mirror of https://github.com/citusdata/citus.git
Normalize output
parent
02526f1bee
commit
3eb0121bb6
|
@ -5,20 +5,20 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1;
|
|||
-- add the nodes to the cluster
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- get the active nodes
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638)
|
||||
(localhost,57637)
|
||||
(2 rows)
|
||||
|
@ -26,14 +26,14 @@ SELECT master_get_active_worker_nodes();
|
|||
-- try to add a node that is already in the cluster
|
||||
SELECT * FROM master_add_node('localhost', :worker_1_port);
|
||||
master_add_node
|
||||
-----------------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- get the active nodes
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638)
|
||||
(localhost,57637)
|
||||
(2 rows)
|
||||
|
@ -41,39 +41,39 @@ SELECT master_get_active_worker_nodes();
|
|||
-- try to remove a node (with no placements)
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify that the node has been deleted
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637)
|
||||
(1 row)
|
||||
|
||||
-- try to disable a node with no placements see that node is s=removed
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
citus_disable_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637)
|
||||
(1 row)
|
||||
|
||||
|
@ -89,12 +89,12 @@ ROLLBACK;
|
|||
-- check that the rebalancer works even if there are no distributed tables
|
||||
SELECT * FROM get_rebalance_table_shards_plan();
|
||||
table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport
|
||||
------------+---------+------------+------------+------------+------------+------------
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM rebalance_table_shards();
|
||||
rebalance_table_shards
|
||||
------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -104,21 +104,21 @@ TRUNCATE pg_dist_colocation;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
|
||||
SELECT * FROM citus_activate_node('localhost', :worker_2_port);
|
||||
citus_activate_node
|
||||
---------------------
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- see that there are some active placements in the candidate node
|
||||
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
|
||||
shardid | shardstate | nodename | nodeport
|
||||
---------+------------+-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
1220001 | 1 | localhost | 57638
|
||||
1220003 | 1 | localhost | 57638
|
||||
1220005 | 1 | localhost | 57638
|
||||
|
@ -131,12 +131,12 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER
|
|||
|
||||
-- try to remove a node with active placements and see that node removal is failed
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638)
|
||||
(localhost,57637)
|
||||
(2 rows)
|
||||
|
@ -145,26 +145,26 @@ SELECT master_get_active_worker_nodes();
|
|||
CREATE TABLE test_reference_table (y int primary key, name text);
|
||||
SELECT create_reference_table('test_reference_table');
|
||||
create_reference_table
|
||||
------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_reference_table VALUES (1, '1');
|
||||
-- try to remove a node with active placements and reference tables
|
||||
SELECT citus_remove_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
-- try to disable a node with active placements
|
||||
-- which should fail because there are some placements
|
||||
-- which are the only placements for a given shard
|
||||
SELECT citus_disable_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638)
|
||||
(localhost,57637)
|
||||
(2 rows)
|
||||
|
@ -175,7 +175,7 @@ ERROR: node at "localhost.noexist:2345" does not exist
|
|||
-- drop the table without leaving a shard placement behind (messes up other tests)
|
||||
SELECT master_activate_node('localhost', :worker_2_port);
|
||||
master_activate_node
|
||||
----------------------
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
|
@ -186,7 +186,7 @@ CREATE USER non_super_user;
|
|||
CREATE USER node_metadata_user;
|
||||
SELECT 1 FROM run_command_on_workers('CREATE USER node_metadata_user');
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
@ -203,35 +203,35 @@ GRANT EXECUTE ON FUNCTION master_update_node(int,text,int,bool,int) TO node_meta
|
|||
-- user needs permission for the pg_dist_node and pg_dist_local_group for metadata syncing
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO node_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO node_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO node_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO node_metadata_user');
|
||||
run_command_on_workers
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
(2 rows)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -262,13 +262,13 @@ HINT: Run the command with a superuser.
|
|||
BEGIN;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
|
@ -285,7 +285,7 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_
|
|||
|
||||
SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport;
|
||||
nodename | nodeport | noderole
|
||||
-----------+----------+-----------
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | primary
|
||||
localhost | 57640 | secondary
|
||||
(2 rows)
|
||||
|
@ -297,14 +297,14 @@ SET citus.shard_count TO 16;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637)
|
||||
(1 row)
|
||||
|
||||
-- restore the node for next tests
|
||||
SELECT * FROM master_add_node('localhost', :worker_2_port);
|
||||
master_add_node
|
||||
-----------------
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
|
@ -313,13 +313,13 @@ ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 6;
|
|||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- try to remove a node with active placements and see that node removal is failed
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
-- mark all placements in the candidate node as inactive
|
||||
|
@ -328,14 +328,14 @@ UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group;
|
|||
-- manual updates to pg_dist* tables are not automatically reflected to the workers, so we manually do that too
|
||||
SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=' || :'worker_2_group');
|
||||
run_command_on_workers
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"UPDATE 8")
|
||||
(localhost,57638,t,"UPDATE 8")
|
||||
(2 rows)
|
||||
|
||||
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
|
||||
shardid | shardstate | nodename | nodeport
|
||||
---------+------------+-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
1220001 | 3 | localhost | 57638
|
||||
1220003 | 3 | localhost | 57638
|
||||
1220005 | 3 | localhost | 57638
|
||||
|
@ -348,12 +348,12 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER
|
|||
|
||||
-- try to remove a node with only inactive placements and see that removal still fails
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
SELECT master_get_active_worker_nodes();
|
||||
master_get_active_worker_nodes
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638)
|
||||
(localhost,57637)
|
||||
(2 rows)
|
||||
|
@ -362,14 +362,14 @@ SELECT master_get_active_worker_nodes();
|
|||
UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=:worker_2_group;
|
||||
SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=' || :'worker_2_group');
|
||||
run_command_on_workers
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"UPDATE 8")
|
||||
(localhost,57638,t,"UPDATE 8")
|
||||
(2 rows)
|
||||
|
||||
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
|
||||
shardid | shardstate | nodename | nodeport
|
||||
---------+------------+-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
1220001 | 4 | localhost | 57638
|
||||
1220003 | 4 | localhost | 57638
|
||||
1220005 | 4 | localhost | 57638
|
||||
|
@ -408,14 +408,14 @@ WARNING: could not find any shard placements for shardId 1220017
|
|||
WARNING: could not find any shard placements for shardId 1220017
|
||||
WARNING: could not find any shard placements for shardId 1220017
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Check that colocated shards don't get created for shards that are to be deleted
|
||||
SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard ORDER BY shardstate, shardid;
|
||||
logicalrelid | shardid | shardstate | nodename | nodeport
|
||||
-----------------------------------+---------+------------+-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
cluster_management_test | 1220000 | 1 | localhost | 57637
|
||||
cluster_management_test | 1220002 | 1 | localhost | 57637
|
||||
cluster_management_test | 1220004 | 1 | localhost | 57637
|
||||
|
@ -445,14 +445,14 @@ SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_
|
|||
-- clean-up
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=:worker_2_group;
|
||||
SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=' || :'worker_2_group');
|
||||
run_command_on_workers
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"UPDATE 8")
|
||||
(localhost,57638,t,"UPDATE 8")
|
||||
(2 rows)
|
||||
|
@ -468,7 +468,7 @@ ERROR: node group 6 does not have a primary node
|
|||
DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
SELECT run_command_on_workers('DELETE FROM pg_dist_node WHERE nodeport=' || :'worker_2_port');
|
||||
run_command_on_workers
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DELETE 1")
|
||||
(1 row)
|
||||
|
||||
|
@ -492,31 +492,31 @@ SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset
|
|||
UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group;
|
||||
SELECT run_command_on_workers('UPDATE pg_dist_placement SET groupid = ' || :'new_group' || ' WHERE groupid = ' || :'worker_2_group');
|
||||
run_command_on_workers
|
||||
--------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"UPDATE 8")
|
||||
(localhost,57638,t,"UPDATE 0")
|
||||
(2 rows)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
start_metadata_sync_to_node
|
||||
-----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test that you are allowed to remove secondary nodes even if there are placements
|
||||
SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary');
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
ERROR: cannot remove or disable the node localhost:57638 because because it contains the only shard placement for shard 1220001
|
||||
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
|
||||
DETAIL: One of the table(s) that prevents the operation complete successfully is public.cluster_management_test
|
||||
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
|
||||
SELECT master_remove_node('localhost', 9990);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -525,40 +525,40 @@ DROP TABLE cluster_management_test;
|
|||
-- check that adding/removing nodes are propagated to nodes with metadata
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
-----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
nodename | nodeport
|
||||
-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57638
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
nodename | nodeport
|
||||
----------+----------
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -566,20 +566,20 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep
|
|||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57637)
|
||||
stop_metadata_sync_to_node
|
||||
----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
nodename | nodeport
|
||||
----------+----------
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
|
@ -588,13 +588,13 @@ SELECT
|
|||
master_remove_node('localhost', :worker_1_port),
|
||||
master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node | master_remove_node
|
||||
--------------------+--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
|
||||
(1 row)
|
||||
|
||||
SELECT count(1) FROM pg_dist_node;
|
||||
count
|
||||
-------
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
|
@ -603,13 +603,13 @@ SELECT
|
|||
master_add_node('localhost', :worker_1_port),
|
||||
master_add_node('localhost', :worker_2_port);
|
||||
master_add_node | master_add_node
|
||||
-----------------+-----------------
|
||||
---------------------------------------------------------------------
|
||||
11 | 12
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
--------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------
|
||||
---------------------------------------------------------------------
|
||||
11 | 9 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||
12 | 10 | localhost | 57638 | default | t | t | primary | default | t | t
|
||||
(2 rows)
|
||||
|
@ -618,84 +618,84 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
|
|||
BEGIN;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
nodename | nodeport
|
||||
----------+----------
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
-----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
COMMIT;
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
nodename | nodeport
|
||||
-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57638
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
nodename | nodeport
|
||||
-----------+----------
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57638
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
|
@ -703,21 +703,21 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
|||
SET citus.shard_count TO 4;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE temp(col1 text, col2 int);
|
||||
SELECT create_distributed_table('temp', 'col1');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -726,7 +726,7 @@ INSERT INTO temp VALUES ('row2', 2);
|
|||
COMMIT;
|
||||
SELECT col1, col2 FROM temp ORDER BY col1;
|
||||
col1 | col2
|
||||
------+------
|
||||
---------------------------------------------------------------------
|
||||
row1 | 1
|
||||
row2 | 2
|
||||
(2 rows)
|
||||
|
@ -740,7 +740,7 @@ WHERE
|
|||
AND pg_dist_shard.logicalrelid = 'temp'::regclass
|
||||
AND pg_dist_shard_placement.nodeport = :worker_2_port;
|
||||
count
|
||||
-------
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
|
@ -754,14 +754,14 @@ DELETE FROM pg_dist_node;
|
|||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57637)
|
||||
stop_metadata_sync_to_node
|
||||
----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
NOTICE: dropping metadata on the node (localhost,57638)
|
||||
stop_metadata_sync_to_node
|
||||
----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -776,51 +776,51 @@ ERROR: group 14 already has a primary node
|
|||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary');
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable');
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- add_inactive_node also works with secondaries
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary');
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- check that you can add a seconary to a non-default cluster, and activate it, and remove it
|
||||
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
|
||||
master_add_inactive_node
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
22
|
||||
(1 row)
|
||||
|
||||
SELECT master_activate_node('localhost', 9999);
|
||||
master_activate_node
|
||||
----------------------
|
||||
---------------------------------------------------------------------
|
||||
22
|
||||
(1 row)
|
||||
|
||||
SELECT citus_disable_node('localhost', 9999);
|
||||
citus_disable_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_remove_node('localhost', 9999);
|
||||
master_remove_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -828,11 +828,11 @@ SELECT master_remove_node('localhost', 9999);
|
|||
INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole)
|
||||
VALUES ('localhost', 5000, :worker_1_group, 'primary');
|
||||
ERROR: there cannot be two primary nodes in a group
|
||||
CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line 10 at RAISE
|
||||
CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX at RAISE
|
||||
UPDATE pg_dist_node SET noderole = 'primary'
|
||||
WHERE groupid = :worker_1_group AND nodeport = 9998;
|
||||
ERROR: there cannot be two primary nodes in a group
|
||||
CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line 18 at RAISE
|
||||
CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX at RAISE
|
||||
-- check that you can't manually add a primary to a non-default cluster
|
||||
INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster)
|
||||
VALUES ('localhost', 5000, 1000, 'primary', 'olap');
|
||||
|
@ -846,7 +846,7 @@ DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary,
|
|||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap');
|
||||
master_add_node
|
||||
-----------------
|
||||
---------------------------------------------------------------------
|
||||
25
|
||||
(1 row)
|
||||
|
||||
|
@ -859,13 +859,13 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole =
|
|||
'overflow'
|
||||
);
|
||||
master_add_node
|
||||
-----------------
|
||||
---------------------------------------------------------------------
|
||||
26
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_node WHERE nodeport=8887;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
--------+---------+-----------+----------+----------+-------------+----------+-----------+-----------------------------------------------------------------+----------------+------------------
|
||||
---------------------------------------------------------------------
|
||||
26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t
|
||||
(1 row)
|
||||
|
||||
|
@ -874,21 +874,21 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887;
|
|||
-- master_add_secondary_node lets you skip looking up the groupid
|
||||
SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port);
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
27
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port);
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
28
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000);
|
||||
ERROR: node at "localhost:2000" does not exist
|
||||
ERROR: node at "localhost:xxxxx" does not exist
|
||||
SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
||||
master_add_secondary_node
|
||||
---------------------------
|
||||
---------------------------------------------------------------------
|
||||
29
|
||||
(1 row)
|
||||
|
||||
|
@ -902,33 +902,33 @@ ERROR: there is already another node with the specified hostname and port
|
|||
-- master_update_node moves a node
|
||||
SELECT master_update_node(:worker_1_node, 'somehost', 9000);
|
||||
master_update_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
--------+---------+----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------
|
||||
---------------------------------------------------------------------
|
||||
16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t
|
||||
(1 row)
|
||||
|
||||
-- cleanup
|
||||
SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port);
|
||||
master_update_node
|
||||
--------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
--------+---------+-----------+----------+----------+-------------+----------+----------+-------------+----------------+------------------
|
||||
---------------------------------------------------------------------
|
||||
16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
|
||||
start_metadata_sync_to_node
|
||||
-----------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
@ -938,14 +938,14 @@ SET citus.shard_replication_factor TO 1;
|
|||
CREATE TABLE test_dist (x int, y int);
|
||||
SELECT create_distributed_table('test_dist', 'x');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- testing behaviour when setting shouldhaveshards to false on partially empty node
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
|
||||
master_set_node_property
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -955,25 +955,25 @@ CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int);
|
|||
CREATE TABLE test_ref (a int, b int);
|
||||
SELECT create_distributed_table('test_dist_colocated', 'x');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_dist_colocated_with_non_colocated', 'x', colocate_with => 'test_dist_non_colocated');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('test_ref');
|
||||
create_reference_table
|
||||
------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -982,7 +982,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 2
|
||||
57638 | 2
|
||||
(2 rows)
|
||||
|
@ -992,7 +992,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 4
|
||||
(1 row)
|
||||
|
||||
|
@ -1002,7 +1002,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist_colocated_with_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 4
|
||||
(1 row)
|
||||
|
||||
|
@ -1011,7 +1011,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 1
|
||||
57638 | 1
|
||||
(2 rows)
|
||||
|
@ -1021,7 +1021,7 @@ DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, te
|
|||
-- testing behaviour when setting shouldhaveshards to false on fully empty node
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
|
||||
master_set_node_property
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -1031,13 +1031,13 @@ CREATE TABLE test_dist_non_colocated (x int, y int);
|
|||
CREATE TABLE test_ref (a int, b int);
|
||||
SELECT create_distributed_table('test_dist', 'x');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('test_ref');
|
||||
create_reference_table
|
||||
------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -1046,7 +1046,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 4
|
||||
(1 row)
|
||||
|
||||
|
@ -1055,14 +1055,14 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 1
|
||||
57638 | 1
|
||||
(2 rows)
|
||||
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
|
||||
master_set_node_property
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -1072,7 +1072,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 4
|
||||
(1 row)
|
||||
|
||||
|
@ -1081,20 +1081,20 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 1
|
||||
57638 | 1
|
||||
(2 rows)
|
||||
|
||||
SELECT create_distributed_table('test_dist_colocated', 'x');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_dist_non_colocated', 'x', colocate_with => 'none');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -1104,7 +1104,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist_colocated'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 4
|
||||
(1 row)
|
||||
|
||||
|
@ -1114,7 +1114,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
nodeport | count
|
||||
----------+-------
|
||||
---------------------------------------------------------------------
|
||||
57637 | 2
|
||||
57638 | 2
|
||||
(2 rows)
|
||||
|
@ -1125,7 +1125,7 @@ DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated;
|
|||
-- verify that at the end of this file, all primary nodes have metadata synced
|
||||
SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
|
||||
?column?
|
||||
----------
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -671,13 +671,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|||
NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
DETAIL: on server postgres@localhost:57638 connectionId: xxxxxxx
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int) FROM distributed_object_data;
|
||||
DETAIL: on server postgres@localhost:57637 connectionId: xxxxxxx
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
||||
DETAIL: on server postgres@localhost:57638 connectionId: xxxxxxx
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
||||
DETAIL: on server postgres@localhost:57637 connectionId: xxxxxxx
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's')
|
||||
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
||||
NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's')
|
||||
|
|
|
@ -169,7 +169,7 @@ COMMIT;
|
|||
-- TODO: Update the test once sync by default guc will be removed
|
||||
SELECT run_command_on_workers($$DROP TABLE single_node.test$$);
|
||||
run_command_on_workers
|
||||
----------------------------------
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP TABLE")
|
||||
(1 row)
|
||||
|
||||
|
|
Loading…
Reference in New Issue