Revert "Revert "Creates new colocation for colocate_with:='none' too"" (#6227)

This reverts commit d171a736ab.
pull/6236/head
aykut-bozkurt 2022-08-24 10:54:04 +03:00 committed by GitHub
parent bad8196da3
commit 041f88d7bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 442 additions and 204 deletions

View File

@ -1000,7 +1000,9 @@ ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
} }
else if (IsColocateWithNone(colocateWithTableName)) else if (IsColocateWithNone(colocateWithTableName))
{ {
colocationId = GetNextColocationId(); colocationId = CreateColocationGroup(shardCount, ShardReplicationFactor,
distributionColumnType,
distributionColumnCollation);
createdColocationGroup = true; createdColocationGroup = true;
} }

View File

@ -12,6 +12,7 @@
#include "miscadmin.h" #include "miscadmin.h"
#include "distributed/colocation_utils.h"
#include "distributed/commands/utility_hook.h" #include "distributed/commands/utility_hook.h"
#include "distributed/commands.h" #include "distributed/commands.h"
#include "distributed/metadata_utility.h" #include "distributed/metadata_utility.h"
@ -70,6 +71,8 @@ master_remove_partition_metadata(PG_FUNCTION_ARGS)
char *schemaName = text_to_cstring(schemaNameText); char *schemaName = text_to_cstring(schemaNameText);
char *tableName = text_to_cstring(tableNameText); char *tableName = text_to_cstring(tableNameText);
uint32 colocationId = ColocationIdViaCatalog(relationId);
/* /*
* The SQL_DROP trigger calls this function even for tables that are * The SQL_DROP trigger calls this function even for tables that are
* not distributed. In that case, silently ignore. This is not very * not distributed. In that case, silently ignore. This is not very
@ -87,6 +90,8 @@ master_remove_partition_metadata(PG_FUNCTION_ARGS)
DeletePartitionRow(relationId); DeletePartitionRow(relationId);
DeleteColocationGroupIfNoTablesBelong(colocationId);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }

View File

@ -615,6 +615,45 @@ PartitionColumnViaCatalog(Oid relationId)
} }
/*
* ColocationIdViaCatalog gets a relationId and returns the colocation
* id column from pg_dist_partition via reading from catalog.
*/
uint32
ColocationIdViaCatalog(Oid relationId)
{
HeapTuple partitionTuple = PgDistPartitionTupleViaCatalog(relationId);
if (!HeapTupleIsValid(partitionTuple))
{
return INVALID_COLOCATION_ID;
}
Datum datumArray[Natts_pg_dist_partition];
bool isNullArray[Natts_pg_dist_partition];
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
heap_deform_tuple(partitionTuple, tupleDescriptor, datumArray, isNullArray);
if (isNullArray[Anum_pg_dist_partition_colocationid - 1])
{
/* colocation id cannot be NULL, still let's make sure */
heap_freetuple(partitionTuple);
table_close(pgDistPartition, NoLock);
return INVALID_COLOCATION_ID;
}
Datum colocationIdDatum = datumArray[Anum_pg_dist_partition_colocationid - 1];
uint32 colocationId = DatumGetUInt32(colocationIdDatum);
heap_freetuple(partitionTuple);
table_close(pgDistPartition, NoLock);
return colocationId;
}
/* /*
* PgDistPartitionTupleViaCatalog is a helper function that searches * PgDistPartitionTupleViaCatalog is a helper function that searches
* pg_dist_partition for the given relationId. The caller is responsible * pg_dist_partition for the given relationId. The caller is responsible

View File

@ -152,6 +152,7 @@ extern char PgDistPartitionViaCatalog(Oid relationId);
extern List * LookupDistShardTuples(Oid relationId); extern List * LookupDistShardTuples(Oid relationId);
extern char PartitionMethodViaCatalog(Oid relationId); extern char PartitionMethodViaCatalog(Oid relationId);
extern Var * PartitionColumnViaCatalog(Oid relationId); extern Var * PartitionColumnViaCatalog(Oid relationId);
extern uint32 ColocationIdViaCatalog(Oid relationId);
extern bool IsCitusLocalTableByDistParams(char partitionMethod, char replicationModel); extern bool IsCitusLocalTableByDistParams(char partitionMethod, char replicationModel);
extern List * CitusTableList(void); extern List * CitusTableList(void);
extern ShardInterval * LoadShardInterval(uint64 shardId); extern ShardInterval * LoadShardInterval(uint64 shardId);

View File

@ -353,6 +353,8 @@ NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.pa
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047)
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047)
ROLLBACK; ROLLBACK;
NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK
NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK
@ -374,6 +376,8 @@ NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCAD
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1')
NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047)
NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047)
ROLLBACK; ROLLBACK;
NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK
NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK

View File

@ -115,7 +115,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row) (1 row)
SELECT create_distributed_table('test_table', 'id'); SELECT create_distributed_table('test_table', 'id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -572,7 +574,7 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row) (1 row)
SELECT create_distributed_table('test_table', 'id'); SELECT create_distributed_table('test_table', 'id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -637,7 +639,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row) (1 row)
SELECT create_distributed_table('test_table', 'id'); SELECT create_distributed_table('test_table', 'id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -25,7 +25,9 @@ SELECT citus.mitmproxy('conn.onQuery().kill()');
(1 row) (1 row)
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT count(*) FROM pg_dist_shard_placement; SELECT count(*) FROM pg_dist_shard_placement;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -40,7 +42,9 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()');
(1 row) (1 row)
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT count(*) FROM pg_dist_shard_placement; SELECT count(*) FROM pg_dist_shard_placement;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -70,7 +74,9 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()');
(1 row) (1 row)
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT count(*) FROM pg_dist_shard_placement; SELECT count(*) FROM pg_dist_shard_placement;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -171,8 +177,8 @@ SELECT create_reference_table('ref_table');
SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shardid, nodeport; SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shardid, nodeport;
shardid | nodeport | shardstate shardid | nodeport | shardstate
--------------------------------------------------------------------- ---------------------------------------------------------------------
10000008 | 9060 | 1 10000003 | 9060 | 1
10000008 | 57637 | 1 10000003 | 57637 | 1
(2 rows) (2 rows)
SET client_min_messages TO NOTICE; SET client_min_messages TO NOTICE;

View File

@ -86,7 +86,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row) (1 row)
SELECT create_distributed_table('test_table','id'); SELECT create_distributed_table('test_table','id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -338,7 +340,9 @@ SELECT citus.mitmproxy('conn.kill()');
BEGIN; BEGIN;
SELECT create_distributed_table('test_table','id'); SELECT create_distributed_table('test_table','id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
ROLLBACK; ROLLBACK;
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
@ -372,7 +376,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
BEGIN; BEGIN;
SELECT create_distributed_table('test_table','id'); SELECT create_distributed_table('test_table','id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
ROLLBACK; ROLLBACK;
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
@ -446,7 +452,9 @@ SELECT citus.mitmproxy('conn.kill()');
BEGIN; BEGIN;
SELECT create_distributed_table('test_table','id'); SELECT create_distributed_table('test_table','id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
ROLLBACK; ROLLBACK;
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy
@ -510,7 +518,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
BEGIN; BEGIN;
SELECT create_distributed_table('test_table','id'); SELECT create_distributed_table('test_table','id');
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
ROLLBACK; ROLLBACK;
SELECT citus.mitmproxy('conn.allow()'); SELECT citus.mitmproxy('conn.allow()');
mitmproxy mitmproxy

View File

@ -1,5 +1,6 @@
CREATE SCHEMA function_propagation_schema; CREATE SCHEMA function_propagation_schema;
SET search_path TO 'function_propagation_schema'; SET search_path TO 'function_propagation_schema';
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
-- Check whether supported dependencies can be distributed while propagating functions -- Check whether supported dependencies can be distributed while propagating functions
-- Check types -- Check types
SET citus.enable_metadata_sync TO OFF; SET citus.enable_metadata_sync TO OFF;
@ -1115,7 +1116,7 @@ SELECT create_distributed_function('func_to_colocate(int)', colocate_with:='tbl_
SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc;
distribution_argument_index | colocationid | force_delegation distribution_argument_index | colocationid | force_delegation
--------------------------------------------------------------------- ---------------------------------------------------------------------
| 10005 | | 10002 |
(1 row) (1 row)
-- convert to non-delegated -- convert to non-delegated
@ -1143,7 +1144,7 @@ SELECT create_distributed_function('func_to_colocate(int)','$1','tbl_to_colocate
SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc;
distribution_argument_index | colocationid | force_delegation distribution_argument_index | colocationid | force_delegation
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 | 10004 | 0 | 10001 |
(1 row) (1 row)
-- try create or replace the same func -- try create or replace the same func
@ -1152,7 +1153,7 @@ CREATE OR REPLACE FUNCTION func_to_colocate (a int) returns int as $$select 1;$$
SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc;
distribution_argument_index | colocationid | force_delegation distribution_argument_index | colocationid | force_delegation
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 | 10004 | 0 | 10001 |
(1 row) (1 row)
-- convert to non-delegated -- convert to non-delegated
@ -1180,7 +1181,7 @@ SELECT create_distributed_function('func_to_colocate(int)','$1','tbl_to_colocate
SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc;
distribution_argument_index | colocationid | force_delegation distribution_argument_index | colocationid | force_delegation
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 | 10004 | t 0 | 10001 | t
(1 row) (1 row)
-- convert to non-delegated -- convert to non-delegated

View File

@ -2,10 +2,10 @@ Parsed test spec with 5 sessions
starting permutation: s2-begin s2-create_distributed_table s3-create_distributed_table s2-commit starting permutation: s2-begin s2-create_distributed_table s3-create_distributed_table s2-commit
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-create_distributed_table: step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -13,7 +13,7 @@ create_distributed_table
(1 row) (1 row)
step s3-create_distributed_table: step s3-create_distributed_table:
SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1'); SELECT create_distributed_table('concurrent_table_3', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -21,15 +21,15 @@ create_distributed_table
(1 row) (1 row)
step s2-commit: step s2-commit:
COMMIT; COMMIT;
starting permutation: s2-begin s2-create_distributed_table s1-move-shard-logical s2-commit s3-sanity-check s3-sanity-check-2 starting permutation: s2-begin s2-create_distributed_table s1-move-shard-logical s2-commit s3-sanity-check s3-sanity-check-2
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-create_distributed_table: step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -37,15 +37,15 @@ create_distributed_table
(1 row) (1 row)
step s1-move-shard-logical: step s1-move-shard-logical:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid; SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
ERROR: could not acquire the lock required to move public.concurrent_table_1 ERROR: could not acquire the lock required to move public.concurrent_table_1
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s3-sanity-check: step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -53,7 +53,7 @@ count
(1 row) (1 row)
step s3-sanity-check-2: step s3-sanity-check-2:
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -63,10 +63,10 @@ count
starting permutation: s2-begin s2-create_distributed_table s1-move-shard-block s2-commit s3-sanity-check s3-sanity-check-2 starting permutation: s2-begin s2-create_distributed_table s1-move-shard-block s2-commit s3-sanity-check s3-sanity-check-2
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-create_distributed_table: step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -74,15 +74,15 @@ create_distributed_table
(1 row) (1 row)
step s1-move-shard-block: step s1-move-shard-block:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
ERROR: could not acquire the lock required to move public.concurrent_table_1 ERROR: could not acquire the lock required to move public.concurrent_table_1
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s3-sanity-check: step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -90,7 +90,7 @@ count
(1 row) (1 row)
step s3-sanity-check-2: step s3-sanity-check-2:
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -100,10 +100,10 @@ count
starting permutation: s2-begin s2-create_distributed_table s1-split-block s2-commit s3-sanity-check s3-sanity-check-2 starting permutation: s2-begin s2-create_distributed_table s1-split-block s2-commit s3-sanity-check s3-sanity-check-2
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s2-create_distributed_table: step s2-create_distributed_table:
SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1'); SELECT create_distributed_table('concurrent_table_2', 'id', colocate_with := 'concurrent_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -111,16 +111,16 @@ create_distributed_table
(1 row) (1 row)
step s1-split-block: step s1-split-block:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1) WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_1'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_split_shard_by_split_points( SELECT citus_split_shard_by_split_points(
shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid; shardid.shardid, ARRAY['2113265921'], ARRAY[(SELECT * FROM first_node_id), (SELECT * FROM first_node_id)], 'block_writes') FROM shardid;
ERROR: could not acquire the lock required to split public.concurrent_table_1 ERROR: could not acquire the lock required to split public.concurrent_table_1
step s2-commit: step s2-commit:
COMMIT; COMMIT;
step s3-sanity-check: step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -128,7 +128,7 @@ count
(1 row) (1 row)
step s3-sanity-check-2: step s3-sanity-check-2:
SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id); SELECT count(*) FROM concurrent_table_1 JOIN concurrent_table_2 USING (id);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -138,11 +138,11 @@ count
starting permutation: s4-begin s4-move-shard-logical s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 starting permutation: s4-begin s4-move-shard-logical s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4
step s4-begin: step s4-begin:
BEGIN; BEGIN;
step s4-move-shard-logical: step s4-move-shard-logical:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1) WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid; SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638) FROM shardid;
citus_move_shard_placement citus_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -150,17 +150,17 @@ citus_move_shard_placement
(1 row) (1 row)
step s5-setup-rep-factor: step s5-setup-rep-factor:
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
step s5-create_implicit_colocated_distributed_table: step s5-create_implicit_colocated_distributed_table:
SELECT create_distributed_table('concurrent_table_5', 'id'); SELECT create_distributed_table('concurrent_table_5', 'id');
ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_4 ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_1
step s4-commit: step s4-commit:
commit; commit;
step s3-sanity-check: step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -168,7 +168,7 @@ count
(1 row) (1 row)
step s3-sanity-check-3: step s3-sanity-check-3:
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5'); SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -176,7 +176,7 @@ count
(1 row) (1 row)
step s3-sanity-check-4: step s3-sanity-check-4:
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id); SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -186,11 +186,11 @@ count
starting permutation: s4-begin s4-move-shard-block s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4 starting permutation: s4-begin s4-move-shard-block s5-setup-rep-factor s5-create_implicit_colocated_distributed_table s4-commit s3-sanity-check s3-sanity-check-3 s3-sanity-check-4
step s4-begin: step s4-begin:
BEGIN; BEGIN;
step s4-move-shard-block: step s4-move-shard-block:
WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1) WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'concurrent_table_4'::regclass ORDER BY shardid LIMIT 1)
SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid; SELECT citus_move_Shard_placement(shardid.shardid, 'localhost', 57637, 'localhost', 57638, 'block_writes') FROM shardid;
citus_move_shard_placement citus_move_shard_placement
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -198,17 +198,17 @@ citus_move_shard_placement
(1 row) (1 row)
step s5-setup-rep-factor: step s5-setup-rep-factor:
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
step s5-create_implicit_colocated_distributed_table: step s5-create_implicit_colocated_distributed_table:
SELECT create_distributed_table('concurrent_table_5', 'id'); SELECT create_distributed_table('concurrent_table_5', 'id');
ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_4 ERROR: could not acquire the lock required to colocate distributed table public.concurrent_table_1
step s4-commit: step s4-commit:
commit; commit;
step s3-sanity-check: step s3-sanity-check:
SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL; SELECT count(*) FROM pg_dist_shard LEFT JOIN pg_dist_shard_placement USING(shardid) WHERE nodename IS NULL;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -216,7 +216,7 @@ count
(1 row) (1 row)
step s3-sanity-check-3: step s3-sanity-check-3:
SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5'); SELECT count(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid IN ('concurrent_table_4', 'concurrent_table_5');
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -224,7 +224,7 @@ count
(1 row) (1 row)
step s3-sanity-check-4: step s3-sanity-check-4:
SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id); SELECT count(*) FROM concurrent_table_4 JOIN concurrent_table_5 USING (id);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -1235,7 +1235,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -1306,6 +1306,16 @@ master_remove_node
(1 row) (1 row)
step s2-create-table-for-colocation:
CREATE SCHEMA col_schema;
CREATE TABLE col_schema.col_tbl (a INT, b INT);
SELECT create_distributed_table('col_schema.col_tbl', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-add-worker: step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638); SELECT 1 FROM master_add_node('localhost', 57638);
@ -1372,11 +1382,13 @@ pg_identify_object_as_address
--------------------------------------------------------------------- ---------------------------------------------------------------------
(database,{regression},{}) (database,{regression},{})
(role,{postgres},{}) (role,{postgres},{})
(schema,{col_schema},{})
(schema,{myschema},{}) (schema,{myschema},{})
(schema,{public},{}) (schema,{public},{})
(table,"{col_schema,col_tbl}",{})
(table,"{myschema,t1}",{}) (table,"{myschema,t1}",{})
(table,"{myschema,t2}",{}) (table,"{myschema,t2}",{})
(6 rows) (8 rows)
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2139,7 +2151,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2210,6 +2222,16 @@ master_remove_node
(1 row) (1 row)
step s2-create-table-for-colocation:
CREATE SCHEMA col_schema;
CREATE TABLE col_schema.col_tbl (a INT, b INT);
SELECT create_distributed_table('col_schema.col_tbl', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -2269,8 +2291,10 @@ pg_identify_object_as_address
(database,{regression},{}) (database,{regression},{})
(function,"{public,add}","{integer,integer}") (function,"{public,add}","{integer,integer}")
(role,{postgres},{}) (role,{postgres},{})
(schema,{col_schema},{})
(schema,{public},{}) (schema,{public},{})
(4 rows) (table,"{col_schema,col_tbl}",{})
(6 rows)
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2322,7 +2346,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2393,6 +2417,16 @@ master_remove_node
(1 row) (1 row)
step s2-create-table-for-colocation:
CREATE SCHEMA col_schema;
CREATE TABLE col_schema.col_tbl (a INT, b INT);
SELECT create_distributed_table('col_schema.col_tbl', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -2459,8 +2493,10 @@ pg_identify_object_as_address
(database,{regression},{}) (database,{regression},{})
(function,"{public,add}","{integer,integer}") (function,"{public,add}","{integer,integer}")
(role,{postgres},{}) (role,{postgres},{})
(schema,{col_schema},{})
(schema,{public},{}) (schema,{public},{})
(4 rows) (table,"{col_schema,col_tbl}",{})
(6 rows)
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -2512,7 +2548,7 @@ master_remove_node
(2 rows) (2 rows)
starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -2583,6 +2619,16 @@ master_remove_node
(1 row) (1 row)
step s2-create-table-for-colocation:
CREATE SCHEMA col_schema;
CREATE TABLE col_schema.col_tbl (a INT, b INT);
SELECT create_distributed_table('col_schema.col_tbl', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-begin: step s2-begin:
BEGIN; BEGIN;
@ -2650,9 +2696,11 @@ pg_identify_object_as_address
(database,{regression},{}) (database,{regression},{})
(function,"{myschema,add}","{integer,integer}") (function,"{myschema,add}","{integer,integer}")
(role,{postgres},{}) (role,{postgres},{})
(schema,{col_schema},{})
(schema,{myschema},{}) (schema,{myschema},{})
(schema,{public},{}) (schema,{public},{})
(5 rows) (table,"{col_schema,col_tbl}",{})
(7 rows)
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -194,7 +194,7 @@ count
(1 row) (1 row)
starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-drop s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-drop s1-commit-worker s1-stop-connection s3-select-count
create_reference_table create_reference_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -227,22 +227,6 @@ run_commands_on_session_level_connection_to_node
(1 row) (1 row)
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
start_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-begin-on-worker:
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s2-drop: step s2-drop:
DROP TABLE ref_table; DROP TABLE ref_table;
<waiting ...> <waiting ...>
@ -255,14 +239,6 @@ run_commands_on_session_level_connection_to_node
(1 row) (1 row)
step s2-drop: <... completed> step s2-drop: <... completed>
step s2-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
run_commands_on_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s1-stop-connection: step s1-stop-connection:
SELECT stop_session_level_connection_to_node(); SELECT stop_session_level_connection_to_node();
@ -271,14 +247,6 @@ stop_session_level_connection_to_node
(1 row) (1 row)
step s2-stop-connection:
SELECT stop_session_level_connection_to_node();
stop_session_level_connection_to_node
---------------------------------------------------------------------
(1 row)
step s3-select-count: step s3-select-count:
SELECT COUNT(*) FROM ref_table; SELECT COUNT(*) FROM ref_table;

View File

@ -508,18 +508,15 @@ DROP TABLE table2_groupA;
SELECT * FROM pg_dist_colocation WHERE colocationid = 4; SELECT * FROM pg_dist_colocation WHERE colocationid = 4;
colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation
--------------------------------------------------------------------- ---------------------------------------------------------------------
4 | 2 | 2 | 23 | 0 (0 rows)
(1 row)
-- check to see whether metadata is synced -- check to see whether metadata is synced
SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$
SELECT array_agg(row_to_json(c)) FROM pg_dist_colocation c WHERE colocationid = 4 SELECT coalesce(array_agg(row_to_json(c)), '{}') FROM pg_dist_colocation c WHERE colocationid = 4
$$); $$);
nodeport | unnest nodeport | unnest
--------------------------------------------------------------------- ---------------------------------------------------------------------
57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} (0 rows)
57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
(2 rows)
-- create dropped colocation group again -- create dropped colocation group again
SET citus.shard_count = 2; SET citus.shard_count = 2;
@ -606,33 +603,35 @@ SELECT * FROM pg_dist_colocation
ORDER BY colocationid; ORDER BY colocationid;
colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation
--------------------------------------------------------------------- ---------------------------------------------------------------------
4 | 2 | 2 | 23 | 0
5 | 2 | 1 | 23 | 0 5 | 2 | 1 | 23 | 0
6 | 2 | 2 | 25 | 100 6 | 2 | 2 | 25 | 100
7 | 8 | 2 | 23 | 0 7 | 8 | 2 | 23 | 0
8 | 2 | 2 | 23 | 0
9 | 2 | 2 | 23 | 0
10 | 2 | 2 | 23 | 0
11 | 3 | 2 | 23 | 0 11 | 3 | 2 | 23 | 0
(5 rows) (7 rows)
SELECT logicalrelid, colocationid FROM pg_dist_partition SELECT logicalrelid, colocationid FROM pg_dist_partition
WHERE colocationid >= 1 AND colocationid < 1000 WHERE colocationid >= 1 AND colocationid < 1000
ORDER BY colocationid, logicalrelid; ORDER BY colocationid, logicalrelid;
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
table1_groupe | 4
table2_groupe | 4
table3_groupe | 4
schema_colocation.table4_groupe | 4
table4_groupe | 4
table1_groupb | 5 table1_groupb | 5
table2_groupb | 5 table2_groupb | 5
table1_groupc | 6 table1_groupc | 6
table2_groupc | 6 table2_groupc | 6
table1_groupd | 7 table1_groupd | 7
table2_groupd | 7 table2_groupd | 7
table1_group_none_1 | 8 table1_groupe | 8
table2_group_none_1 | 8 table2_groupe | 8
table1_group_none_2 | 9 table3_groupe | 8
table1_group_none_3 | 10 schema_colocation.table4_groupe | 8
table4_groupe | 8
table1_group_none_1 | 9
table2_group_none_1 | 9
table1_group_none_2 | 10
table1_group_none_3 | 11
table1_group_default | 11 table1_group_default | 11
(16 rows) (16 rows)
@ -699,12 +698,15 @@ SELECT * FROM pg_dist_colocation
ORDER BY colocationid; ORDER BY colocationid;
colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation
--------------------------------------------------------------------- ---------------------------------------------------------------------
4 | 2 | 2 | 23 | 0
5 | 2 | 1 | 23 | 0 5 | 2 | 1 | 23 | 0
6 | 2 | 2 | 25 | 100 6 | 2 | 2 | 25 | 100
7 | 8 | 2 | 23 | 0 7 | 8 | 2 | 23 | 0
8 | 2 | 2 | 23 | 0
9 | 2 | 2 | 23 | 0
10 | 2 | 2 | 23 | 0
11 | 3 | 2 | 23 | 0 11 | 3 | 2 | 23 | 0
(5 rows) 12 | 1 | -1 | 0 | 0
(8 rows)
-- cross check with internal colocation API -- cross check with internal colocation API
SELECT SELECT
@ -739,8 +741,9 @@ ORDER BY
table3_groupe | table4_groupe | t table3_groupe | table4_groupe | t
schema_colocation.table4_groupe | table4_groupe | t schema_colocation.table4_groupe | table4_groupe | t
table1_group_none_1 | table2_group_none_1 | t table1_group_none_1 | table2_group_none_1 | t
table1_group_none_3 | table1_group_default | t
table1_groupf | table2_groupf | t table1_groupf | table2_groupf | t
(16 rows) (17 rows)
-- check created shards -- check created shards
SELECT SELECT
@ -979,7 +982,9 @@ SELECT * FROM pg_dist_colocation
3 | 8 | 2 | 23 | 0 3 | 8 | 2 | 23 | 0
4 | 2 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0
5 | 2 | 2 | 23 | 0 5 | 2 | 2 | 23 | 0
(5 rows) 6 | 2 | 2 | 23 | 0
7 | 2 | 2 | 23 | 0
(7 rows)
-- check to see whether metadata is synced -- check to see whether metadata is synced
SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$
@ -993,12 +998,16 @@ $$);
57637 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 6, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 7, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 1, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 1, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 2, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"} 57638 | {"shardcount": 2, "colocationid": 2, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"}
57638 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
(10 rows) 57638 | {"shardcount": 2, "colocationid": 6, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 7, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
(14 rows)
SELECT logicalrelid, colocationid FROM pg_dist_partition SELECT logicalrelid, colocationid FROM pg_dist_partition
WHERE colocationid >= 1 AND colocationid < 1000 WHERE colocationid >= 1 AND colocationid < 1000
@ -1073,7 +1082,8 @@ SELECT * FROM pg_dist_colocation
3 | 8 | 2 | 23 | 0 3 | 8 | 2 | 23 | 0
4 | 2 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0
5 | 2 | 2 | 23 | 0 5 | 2 | 2 | 23 | 0
(5 rows) 7 | 2 | 2 | 23 | 0
(6 rows)
-- check to see whether metadata is synced -- check to see whether metadata is synced
SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$
@ -1087,12 +1097,14 @@ $$);
57637 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57637 | {"shardcount": 2, "colocationid": 7, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 1, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 1, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 2, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"} 57638 | {"shardcount": 2, "colocationid": 2, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"}
57638 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
(10 rows) 57638 | {"shardcount": 2, "colocationid": 7, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"}
(12 rows)
SELECT logicalrelid, colocationid FROM pg_dist_partition SELECT logicalrelid, colocationid FROM pg_dist_partition
WHERE colocationid >= 1 AND colocationid < 1000 WHERE colocationid >= 1 AND colocationid < 1000
@ -1443,3 +1455,84 @@ DROP TABLE range_table;
DROP TABLE none; DROP TABLE none;
DROP TABLE ref; DROP TABLE ref;
DROP TABLE local_table; DROP TABLE local_table;
CREATE TABLE tbl_1 (a INT, b INT);
CREATE TABLE tbl_2 (a INT, b INT);
CREATE TABLE tbl_3 (a INT, b INT);
SELECT create_distributed_table('tbl_1', 'a', shard_count:=4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('tbl_2', 'a', shard_count:=4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('tbl_3', 'a', shard_count:=4, colocate_with:='NONE');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT colocation_id AS col_id_1 FROM citus_tables WHERE table_name::text = 'tbl_1' \gset
SELECT colocation_id AS col_id_2 FROM citus_tables WHERE table_name::text = 'tbl_2' \gset
SELECT colocation_id AS col_id_3 FROM citus_tables WHERE table_name::text = 'tbl_3' \gset
-- check that tables are colocated correctly
SELECT :col_id_1 = :col_id_2;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT :col_id_1 = :col_id_3;
?column?
---------------------------------------------------------------------
f
(1 row)
-- check that there are separate rows for both colocation groups in pg_dist_colocation
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1
);
result
---------------------------------------------------------------------
1
1
1
(3 rows)
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3
);
result
---------------------------------------------------------------------
1
1
1
(3 rows)
DROP TABLE tbl_1, tbl_3;
-- check that empty colocation group is dropped and non-empty is not
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1
);
result
---------------------------------------------------------------------
1
1
1
(3 rows)
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3
);
result
---------------------------------------------------------------------
0
0
0
(3 rows)
DROP TABLE tbl_2;

View File

@ -8,6 +8,7 @@ SET citus.next_shard_id TO 910000;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
CREATE SCHEMA fix_idx_names; CREATE SCHEMA fix_idx_names;
SET search_path TO fix_idx_names, public; SET search_path TO fix_idx_names, public;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000;
-- stop metadata sync for one of the worker nodes so we test both cases -- stop metadata sync for one of the worker nodes so we test both cases
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
NOTICE: dropping metadata on the node (localhost,57637) NOTICE: dropping metadata on the node (localhost,57637)

View File

@ -19,6 +19,7 @@ NOTICE: dropping metadata on the node (localhost,57638)
(1 row) (1 row)
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2;
SET citus.replicate_reference_tables_on_activate TO off; SET citus.replicate_reference_tables_on_activate TO off;
SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id
\gset \gset
@ -146,7 +147,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 1, 's') SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
@ -163,7 +164,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (1, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
@ -196,7 +197,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 1, 's') SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
@ -213,7 +214,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (1, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
@ -248,7 +249,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 1, 's') SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
@ -265,7 +266,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (1, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
@ -306,7 +307,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 1, 's') SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
@ -323,7 +324,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (1, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
@ -357,7 +358,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 1, 's') SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
@ -374,7 +375,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (1, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
@ -462,7 +463,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
--------------------------------------------------------------------- ---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1 | s | f mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row) (1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@ -518,7 +519,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
SELECT * FROM pg_dist_colocation ORDER BY colocationid; SELECT * FROM pg_dist_colocation ORDER BY colocationid;
colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 8 | 1 | 23 | 0 2 | 8 | 1 | 23 | 0
(1 row) (1 row)
-- Make sure that truncate trigger has been set for the MX table on worker -- Make sure that truncate trigger has been set for the MX table on worker
@ -600,7 +601,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
--------------------------------------------------------------------- ---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1 | s | f mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row) (1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@ -1887,12 +1888,12 @@ SELECT unnest(activate_node_snapshot()) order by 1;
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 3, 's') SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 5, 's')
SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 3, 's') SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 5, 's')
SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 1, 's') SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's')
SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10004, 's') SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's')
SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10005, 't') SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't')
SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10004, 's') SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's')
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3')
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
@ -1916,7 +1917,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10002, 7, 1, 'integer'::regtype, NULL, NULL), (10003, 3, 1, 'integer'::regtype, NULL, NULL), (10004, 4, 1, 'integer'::regtype, NULL, NULL), (10005, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL), (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 5, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 5, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 5, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 5, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 5, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 5, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310020, 1, 0, 1, 100020), (1310021, 1, 0, 5, 100021), (1310022, 1, 0, 1, 100022), (1310023, 1, 0, 5, 100023), (1310024, 1, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310020, 1, 0, 1, 100020), (1310021, 1, 0, 5, 100021), (1310022, 1, 0, 1, 100022), (1310023, 1, 0, 5, 100023), (1310024, 1, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data;

View File

@ -449,10 +449,12 @@ ORDER BY nodename, nodeport, shardid;
nodename | nodeport | shardid | success | result nodename | nodeport | shardid | success | result
--------------------------------------------------------------------- ---------------------------------------------------------------------
localhost | 57637 | 109102 | t | t localhost | 57637 | 109102 | t | t
localhost | 57637 | 109103 | t | t
localhost | 57637 | 109104 | t | t localhost | 57637 | 109104 | t | t
localhost | 57638 | 109103 | t | t localhost | 57637 | 109106 | t | t
localhost | 57638 | 109105 | t | t localhost | 57638 | 109105 | t | t
(4 rows) localhost | 57638 | 109107 | t | t
(6 rows)
-- revoke from multiple schemas, verify result -- revoke from multiple schemas, verify result
REVOKE SELECT ON ALL TABLES IN SCHEMA multiuser_schema, multiuser_second_schema FROM read_access; REVOKE SELECT ON ALL TABLES IN SCHEMA multiuser_schema, multiuser_second_schema FROM read_access;
@ -473,10 +475,12 @@ ORDER BY nodename, nodeport, shardid;
nodename | nodeport | shardid | success | result nodename | nodeport | shardid | success | result
--------------------------------------------------------------------- ---------------------------------------------------------------------
localhost | 57637 | 109102 | t | f localhost | 57637 | 109102 | t | f
localhost | 57637 | 109103 | t | f
localhost | 57637 | 109104 | t | f localhost | 57637 | 109104 | t | f
localhost | 57638 | 109103 | t | f localhost | 57637 | 109106 | t | f
localhost | 57638 | 109105 | t | f localhost | 57638 | 109105 | t | f
(4 rows) localhost | 57638 | 109107 | t | f
(6 rows)
DROP SCHEMA multiuser_schema CASCADE; DROP SCHEMA multiuser_schema CASCADE;
NOTICE: drop cascades to 3 other objects NOTICE: drop cascades to 3 other objects

View File

@ -269,13 +269,13 @@ RESET client_min_messages;
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0');
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_proc_colocation_0 | 1410004 test_proc_colocation_0 | 1410005
(1 row) (1 row)
SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0'); SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0');
proname | colocationid proname | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
proc_0 | 1410004 proc_0 | 1410005
(1 row) (1 row)
-- shardCount is not null && cascade_to_colocated is true -- shardCount is not null && cascade_to_colocated is true
@ -302,13 +302,13 @@ RESET client_min_messages;
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0');
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_proc_colocation_0 | 1410003 test_proc_colocation_0 | 1410006
(1 row) (1 row)
SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0'); SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0');
proname | colocationid proname | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
proc_0 | 1410003 proc_0 | 1410006
(1 row) (1 row)
-- colocatewith is not null && cascade_to_colocated is true -- colocatewith is not null && cascade_to_colocated is true
@ -356,13 +356,13 @@ RESET client_min_messages;
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0');
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_proc_colocation_0 | 1410005 test_proc_colocation_0 | 1410008
(1 row) (1 row)
SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0'); SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0');
proname | colocationid proname | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
proc_0 | 1410005 proc_0 | 1410008
(1 row) (1 row)
-- try a case with more than one procedure -- try a case with more than one procedure
@ -386,14 +386,14 @@ SELECT create_distributed_function('proc_1(float8)', 'dist_key', 'test_proc_colo
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0');
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_proc_colocation_0 | 1410005 test_proc_colocation_0 | 1410008
(1 row) (1 row)
SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname; SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname;
proname | colocationid proname | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
proc_0 | 1410005 proc_0 | 1410008
proc_1 | 1410005 proc_1 | 1410008
(2 rows) (2 rows)
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;
@ -437,14 +437,14 @@ RESET client_min_messages;
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0');
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_proc_colocation_0 | 1410003 test_proc_colocation_0 | 1410009
(1 row) (1 row)
SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname; SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname;
proname | colocationid proname | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
proc_0 | 1410003 proc_0 | 1410009
proc_1 | 1410003 proc_1 | 1410009
(2 rows) (2 rows)
-- case which shouldn't preserve colocation for now -- case which shouldn't preserve colocation for now
@ -462,14 +462,14 @@ NOTICE: renaming the new table to mx_alter_distributed_table.test_proc_colocati
SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0');
logicalrelid | colocationid logicalrelid | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_proc_colocation_0 | 1410006 test_proc_colocation_0 | 1410010
(1 row) (1 row)
SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname; SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname;
proname | colocationid proname | colocationid
--------------------------------------------------------------------- ---------------------------------------------------------------------
proc_0 | 1410003 proc_0 | 1410009
proc_1 | 1410003 proc_1 | 1410009
(2 rows) (2 rows)
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;

View File

@ -2,6 +2,7 @@
-- MULTI_MX_CREATE_TABLE -- MULTI_MX_CREATE_TABLE
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node start_metadata_sync_to_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -406,29 +407,29 @@ FROM pg_dist_partition NATURAL JOIN shard_counts
ORDER BY colocationid, logicalrelid; ORDER BY colocationid, logicalrelid;
logicalrelid | colocationid | shard_count | partmethod | repmodel logicalrelid | colocationid | shard_count | partmethod | repmodel
--------------------------------------------------------------------- ---------------------------------------------------------------------
customer_mx | 1390001 | 1 | n | t nation_hash | 1220000 | 16 | h | s
nation_mx | 1390001 | 1 | n | t citus_mx_test_schema.nation_hash | 1220000 | 16 | h | s
part_mx | 1390001 | 1 | n | t citus_mx_test_schema_join_1.nation_hash | 1220001 | 4 | h | s
supplier_mx | 1390001 | 1 | n | t citus_mx_test_schema_join_1.nation_hash_2 | 1220001 | 4 | h | s
citus_mx_test_schema_join_1.nation_hash | 1390003 | 4 | h | s citus_mx_test_schema_join_2.nation_hash | 1220001 | 4 | h | s
citus_mx_test_schema_join_1.nation_hash_2 | 1390003 | 4 | h | s citus_mx_test_schema.nation_hash_collation_search_path | 1220001 | 4 | h | s
citus_mx_test_schema_join_2.nation_hash | 1390003 | 4 | h | s citus_mx_test_schema.nation_hash_composite_types | 1220001 | 4 | h | s
citus_mx_test_schema.nation_hash_collation_search_path | 1390003 | 4 | h | s mx_ddl_table | 1220001 | 4 | h | s
citus_mx_test_schema.nation_hash_composite_types | 1390003 | 4 | h | s app_analytics_events_mx | 1220001 | 4 | h | s
mx_ddl_table | 1390003 | 4 | h | s company_employees_mx | 1220001 | 4 | h | s
app_analytics_events_mx | 1390003 | 4 | h | s lineitem_mx | 1220002 | 16 | h | s
company_employees_mx | 1390003 | 4 | h | s orders_mx | 1220002 | 16 | h | s
nation_hash | 1390006 | 16 | h | s customer_mx | 1220003 | 1 | n | t
citus_mx_test_schema.nation_hash | 1390006 | 16 | h | s nation_mx | 1220003 | 1 | n | t
lineitem_mx | 1390007 | 16 | h | s part_mx | 1220003 | 1 | n | t
orders_mx | 1390007 | 16 | h | s supplier_mx | 1220003 | 1 | n | t
limit_orders_mx | 1390008 | 2 | h | s limit_orders_mx | 1220004 | 2 | h | s
articles_hash_mx | 1390008 | 2 | h | s articles_hash_mx | 1220004 | 2 | h | s
multiple_hash_mx | 1390009 | 2 | h | s multiple_hash_mx | 1220005 | 2 | h | s
researchers_mx | 1390010 | 2 | h | s researchers_mx | 1220006 | 2 | h | s
labs_mx | 1390011 | 1 | h | s labs_mx | 1220007 | 1 | h | s
objects_mx | 1390011 | 1 | h | s objects_mx | 1220007 | 1 | h | s
articles_single_shard_hash_mx | 1390011 | 1 | h | s articles_single_shard_hash_mx | 1220007 | 1 | h | s
(23 rows) (23 rows)
-- check the citus_tables view -- check the citus_tables view

View File

@ -1,6 +1,7 @@
CREATE SCHEMA start_stop_metadata_sync; CREATE SCHEMA start_stop_metadata_sync;
SET search_path TO "start_stop_metadata_sync"; SET search_path TO "start_stop_metadata_sync";
SET citus.next_shard_id TO 980000; SET citus.next_shard_id TO 980000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 980000;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -157,12 +158,12 @@ SELECT * FROM test_matview;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
--------------------------------------------------------------------- ---------------------------------------------------------------------
events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
(6 rows) (6 rows)
SELECT count(*) > 0 FROM pg_dist_node; SELECT count(*) > 0 FROM pg_dist_node;

View File

@ -18,6 +18,7 @@ teardown
SELECT 1 FROM master_add_node('localhost', 57638); SELECT 1 FROM master_add_node('localhost', 57638);
RESET search_path; RESET search_path;
DROP SCHEMA IF EXISTS col_schema CASCADE;
DROP TABLE IF EXISTS t1 CASCADE; DROP TABLE IF EXISTS t1 CASCADE;
DROP TABLE IF EXISTS t2 CASCADE; DROP TABLE IF EXISTS t2 CASCADE;
DROP TABLE IF EXISTS t3 CASCADE; DROP TABLE IF EXISTS t3 CASCADE;
@ -118,6 +119,13 @@ step "s2-commit"
COMMIT; COMMIT;
} }
step "s2-create-table-for-colocation"
{
CREATE SCHEMA col_schema;
CREATE TABLE col_schema.col_tbl (a INT, b INT);
SELECT create_distributed_table('col_schema.col_tbl', 'a');
}
// prints from session 2 are run at the end when the worker has already been added by the // prints from session 2 are run at the end when the worker has already been added by the
// test // test
step "s2-print-distributed-objects" step "s2-print-distributed-objects"
@ -199,7 +207,7 @@ permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-sche
// concurrency tests with multi schema distribution // concurrency tests with multi schema distribution
permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas"
permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s1-commit" "s2-create-table" "s2-commit" "s3-create-table" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s1-commit" "s2-create-table" "s2-commit" "s3-create-table" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas"
// type and schema tests // type and schema tests
@ -212,10 +220,10 @@ permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-sche
// s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by // s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by
// "s2-commit", because "COMMIT" syncs the messages // "s2-commit", because "COMMIT" syncs the messages
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas"
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas"
// we cannot run the following operations concurrently // we cannot run the following operations concurrently
// the problem is that NOTIFY event doesn't (reliably) happen before COMMIT // the problem is that NOTIFY event doesn't (reliably) happen before COMMIT
// so we have to commit s2 before s1 starts // so we have to commit s2 before s1 starts
permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas"

View File

@ -118,6 +118,6 @@ step "s3-select-count"
permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-select-ref-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-select-ref-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-drop" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-drop" "s1-commit-worker" "s1-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s2-empty" "s3-select-count" "s1-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s2-empty" "s3-select-count" "s1-stop-connection"

View File

@ -1,5 +1,6 @@
CREATE SCHEMA function_propagation_schema; CREATE SCHEMA function_propagation_schema;
SET search_path TO 'function_propagation_schema'; SET search_path TO 'function_propagation_schema';
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
-- Check whether supported dependencies can be distributed while propagating functions -- Check whether supported dependencies can be distributed while propagating functions

View File

@ -236,7 +236,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 4;
-- check to see whether metadata is synced -- check to see whether metadata is synced
SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$
SELECT array_agg(row_to_json(c)) FROM pg_dist_colocation c WHERE colocationid = 4 SELECT coalesce(array_agg(row_to_json(c)), '{}') FROM pg_dist_colocation c WHERE colocationid = 4
$$); $$);
-- create dropped colocation group again -- create dropped colocation group again
@ -591,3 +591,40 @@ DROP TABLE range_table;
DROP TABLE none; DROP TABLE none;
DROP TABLE ref; DROP TABLE ref;
DROP TABLE local_table; DROP TABLE local_table;
CREATE TABLE tbl_1 (a INT, b INT);
CREATE TABLE tbl_2 (a INT, b INT);
CREATE TABLE tbl_3 (a INT, b INT);
SELECT create_distributed_table('tbl_1', 'a', shard_count:=4);
SELECT create_distributed_table('tbl_2', 'a', shard_count:=4);
SELECT create_distributed_table('tbl_3', 'a', shard_count:=4, colocate_with:='NONE');
SELECT colocation_id AS col_id_1 FROM citus_tables WHERE table_name::text = 'tbl_1' \gset
SELECT colocation_id AS col_id_2 FROM citus_tables WHERE table_name::text = 'tbl_2' \gset
SELECT colocation_id AS col_id_3 FROM citus_tables WHERE table_name::text = 'tbl_3' \gset
-- check that tables are colocated correctly
SELECT :col_id_1 = :col_id_2;
SELECT :col_id_1 = :col_id_3;
-- check that there are separate rows for both colocation groups in pg_dist_colocation
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1
);
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3
);
DROP TABLE tbl_1, tbl_3;
-- check that empty colocation group is dropped and non-empty is not
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1
);
SELECT result FROM run_command_on_all_nodes('
SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3
);
DROP TABLE tbl_2;

View File

@ -9,6 +9,8 @@ SET citus.shard_replication_factor TO 1;
CREATE SCHEMA fix_idx_names; CREATE SCHEMA fix_idx_names;
SET search_path TO fix_idx_names, public; SET search_path TO fix_idx_names, public;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000;
-- stop metadata sync for one of the worker nodes so we test both cases -- stop metadata sync for one of the worker nodes so we test both cases
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);

View File

@ -10,6 +10,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2;
SET citus.replicate_reference_tables_on_activate TO off; SET citus.replicate_reference_tables_on_activate TO off;
SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id

View File

@ -3,6 +3,7 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port);

View File

@ -1,6 +1,7 @@
CREATE SCHEMA start_stop_metadata_sync; CREATE SCHEMA start_stop_metadata_sync;
SET search_path TO "start_stop_metadata_sync"; SET search_path TO "start_stop_metadata_sync";
SET citus.next_shard_id TO 980000; SET citus.next_shard_id TO 980000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 980000;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
SET citus.shard_count TO 4; SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;