From 51c3ed8dfd9370a020424ad1d305629647fd847f Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 26 Sep 2023 00:13:36 +0300 Subject: [PATCH] store needsisolatednode in pg_dist_shard --- .../citus_add_local_table_to_metadata.c | 3 +- .../commands/create_distributed_table.c | 30 ++- .../distributed/metadata/metadata_cache.c | 3 + .../distributed/metadata/metadata_sync.c | 96 ++++++---- .../distributed/metadata/metadata_utility.c | 93 ++++----- .../citus_split_shard_by_split_points.c | 8 +- .../distributed/operations/create_shards.c | 16 +- .../distributed/operations/isolate_shards.c | 8 +- .../rebalancer_placement_isolation.c | 9 +- .../replicate_none_dist_table_shard.c | 31 +-- .../distributed/operations/shard_rebalancer.c | 1 - .../distributed/operations/shard_split.c | 122 ++++++------ .../distributed/operations/shard_transfer.c | 45 +---- .../distributed/operations/stage_protocol.c | 18 +- .../planner/multi_physical_planner.c | 2 + .../distributed/sql/citus--12.1-1--12.2-1.sql | 13 +- .../sql/downgrades/citus--12.2-1--12.1-1.sql | 11 +- .../12.2-1.sql | 24 --- .../latest.sql | 30 +-- .../12.2-1.sql | 23 +++ .../latest.sql | 15 +- .../udfs/citus_shard_set_isolated/12.2-1.sql | 2 + .../udfs/citus_shard_set_isolated/latest.sql | 2 + .../citus_shard_unset_isolated/12.2-1.sql | 2 + .../citus_shard_unset_isolated/latest.sql | 2 + .../distributed/test/distribution_metadata.c | 3 +- src/include/distributed/metadata_sync.h | 13 +- src/include/distributed/metadata_utility.h | 12 +- src/include/distributed/pg_dist_placement.h | 4 +- src/include/distributed/pg_dist_shard.h | 4 +- .../rebalancer_placement_isolation.h | 2 +- src/include/distributed/shard_split.h | 2 +- src/test/regress/bin/normalize.sed | 2 +- ..._create_distributed_table_concurrently.out | 6 +- .../regress/expected/isolate_placement.out | 176 +++++++++--------- ..._create_distributed_table_concurrently.out | 40 ++-- .../expected/metadata_sync_helpers.out | 52 +++--- .../regress/expected/multi_create_shards.out | 4 - src/test/regress/expected/multi_extension.out | 15 +- .../multi_fix_partition_shard_index_names.out | 39 ++-- .../regress/expected/multi_join_pruning.out | 12 +- .../expected/multi_metadata_attributes.out | 2 +- .../regress/expected/multi_metadata_sync.out | 104 +++++------ .../expected/multi_metadata_sync_0.out | 100 +++++----- .../regress/expected/multi_row_insert.out | 6 +- src/test/regress/expected/multi_table_ddl.out | 2 +- .../expected/multi_tenant_isolation.out | 160 ++++++++-------- .../multi_tenant_isolation_nonblocking.out | 160 ++++++++-------- .../regress/expected/multi_test_helpers.out | 50 ++--- .../regress/expected/shard_rebalancer.out | 18 +- .../upgrade_isolate_placement_after.out | 10 +- .../upgrade_isolate_placement_before.out | 10 +- .../expected/upgrade_list_citus_objects.out | 5 +- src/test/regress/sql/isolate_placement.sql | 40 ++-- .../regress/sql/metadata_sync_helpers.sql | 52 +++--- src/test/regress/sql/multi_create_shards.sql | 5 - .../multi_fix_partition_shard_index_names.sql | 8 - .../regress/sql/multi_metadata_attributes.sql | 2 +- src/test/regress/sql/multi_test_helpers.sql | 50 ++--- .../sql/upgrade_isolate_placement_after.sql | 2 +- .../sql/upgrade_isolate_placement_before.sql | 2 +- 61 files changed, 858 insertions(+), 925 deletions(-) delete mode 100644 src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index c713ce099..da43cd3ba 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -1456,8 +1456,9 @@ InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId, text *shardMinValue = NULL; text *shardMaxValue = NULL; + bool needsIsolatedNode = false; InsertShardRow(citusLocalTableId, shardId, shardStorageType, - shardMinValue, shardMaxValue); + shardMinValue, shardMaxValue, needsIsolatedNode); List *nodeList = list_make1(CoordinatorNodeIfAddedAsWorkerOrError()); diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 62e30ec18..e6fe0dd4a 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -582,7 +582,7 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName, List *workersForPlacementList; List *shardSplitPointsList; - List *needsIsolatedNodeForPlacementList; + List *needsIsolatedNodeForPlacementList; if (colocatedTableId != InvalidOid) @@ -599,10 +599,11 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName, */ workersForPlacementList = WorkerNodesForShardList(colocatedShardList); - /* - * Inherit needsisolatednode from the colocated shard placements. - */ - needsIsolatedNodeForPlacementList = NeedsIsolatedNodeForShardList(colocatedShardList); + /* + * Inherit needsisolatednode from the colocated shards. + */ + needsIsolatedNodeForPlacementList = + NeedsIsolatedNodeForShardList(colocatedShardList); } else { @@ -616,11 +617,11 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName, */ workersForPlacementList = RoundRobinWorkerNodeList(workerNodeList, shardCount); - /* - * For a new colocation group, needsisolatednode is set to false for - * all shard placements. - */ - needsIsolatedNodeForPlacementList = GenerateListFromIntElement(false, shardCount); + /* + * For a new colocation group, needsisolatednode is set to false for + * all shards. + */ + needsIsolatedNodeForPlacementList = GenerateListFromIntElement(false, shardCount); } /* @@ -659,7 +660,7 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName, shardToSplit->shardId, shardSplitPointsList, workersForPlacementList, - needsIsolatedNodeForPlacementList, + needsIsolatedNodeForPlacementList, distributionColumnOverrides, sourceColocatedShardIntervalList, colocationId @@ -915,8 +916,6 @@ WorkerNodesForShardList(List *shardList) /* * NeedsIsolatedNodeForShardList returns a list of node booleans reflecting whether * each shard in the given list needs an isolated node. - * - * Each shard within given list is assumed to have a single placement. */ static List * NeedsIsolatedNodeForShardList(List *shardList) @@ -926,9 +925,8 @@ NeedsIsolatedNodeForShardList(List *shardList) ShardInterval *shardInterval = NULL; foreach_ptr(shardInterval, shardList) { - bool missingOk = false; - ShardPlacement *shardPlacement = ActiveShardPlacement(shardInterval->shardId, missingOk); - needsIsolatedNodeList = lappend_int(needsIsolatedNodeList, shardPlacement->needsIsolatedNode); + needsIsolatedNodeList = lappend_int(needsIsolatedNodeList, + shardInterval->needsIsolatedNode); } return needsIsolatedNodeList; diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 55d0f11c5..1d68861aa 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -5379,6 +5379,8 @@ DeformedDistShardTupleToShardInterval(Datum *datumArray, bool *isNullArray, char storageType = DatumGetChar(datumArray[Anum_pg_dist_shard_shardstorage - 1]); Datum minValueTextDatum = datumArray[Anum_pg_dist_shard_shardminvalue - 1]; Datum maxValueTextDatum = datumArray[Anum_pg_dist_shard_shardmaxvalue - 1]; + bool needsIsolatedNode = DatumGetBool( + datumArray[Anum_pg_dist_shard_needsisolatednode - 1]); bool minValueNull = isNullArray[Anum_pg_dist_shard_shardminvalue - 1]; bool maxValueNull = isNullArray[Anum_pg_dist_shard_shardmaxvalue - 1]; @@ -5415,6 +5417,7 @@ DeformedDistShardTupleToShardInterval(Datum *datumArray, bool *isNullArray, shardInterval->minValue = minValue; shardInterval->maxValue = maxValue; shardInterval->shardId = shardId; + shardInterval->needsIsolatedNode = needsIsolatedNode; return shardInterval; } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 81dbb218c..3b2832641 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -95,6 +95,8 @@ char *EnableManualMetadataChangesForUser = ""; int MetadataSyncTransMode = METADATA_SYNC_TRANSACTIONAL; +static Datum citus_internal_add_shard_metadata_internal(PG_FUNCTION_ARGS, + bool expectNeedsIsolatedNode); static void EnsureObjectMetadataIsSane(int distributionArgumentIndex, int colocationId); static List * GetFunctionDependenciesForObjects(ObjectAddress *objectAddress); @@ -167,6 +169,7 @@ PG_FUNCTION_INFO_V1(worker_record_sequence_dependency); PG_FUNCTION_INFO_V1(citus_internal_add_partition_metadata); PG_FUNCTION_INFO_V1(citus_internal_delete_partition_metadata); PG_FUNCTION_INFO_V1(citus_internal_add_shard_metadata); +PG_FUNCTION_INFO_V1(citus_internal_add_shard_metadata_legacy); PG_FUNCTION_INFO_V1(citus_internal_add_placement_metadata); PG_FUNCTION_INFO_V1(citus_internal_delete_placement_metadata); PG_FUNCTION_INFO_V1(citus_internal_add_placement_metadata_legacy); @@ -1203,7 +1206,7 @@ ShardListInsertCommand(List *shardIntervalList) StringInfo insertPlacementCommand = makeStringInfo(); appendStringInfo(insertPlacementCommand, "WITH placement_data(shardid, " - "shardlength, groupid, placementid, needsisolatednode) AS (VALUES "); + "shardlength, groupid, placementid) AS (VALUES "); ShardInterval *shardInterval = NULL; bool firstPlacementProcessed = false; @@ -1226,12 +1229,11 @@ ShardListInsertCommand(List *shardIntervalList) firstPlacementProcessed = true; appendStringInfo(insertPlacementCommand, - "(%ld, %ld, %d, %ld, %s)", + "(%ld, %ld, %d, %ld)", shardId, placement->shardLength, placement->groupId, - placement->placementId, - placement->needsIsolatedNode ? "true" : "false"); + placement->placementId); } } @@ -1239,14 +1241,14 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertPlacementCommand, "SELECT citus_internal_add_placement_metadata(" - "shardid, shardlength, groupid, placementid, needsisolatednode) " + "shardid, shardlength, groupid, placementid) " "FROM placement_data;"); /* now add shards to insertShardCommand */ StringInfo insertShardCommand = makeStringInfo(); appendStringInfo(insertShardCommand, "WITH shard_data(relationname, shardid, storagetype, " - "shardminvalue, shardmaxvalue) AS (VALUES "); + "shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES "); foreach_ptr(shardInterval, shardIntervalList) { @@ -1278,12 +1280,13 @@ ShardListInsertCommand(List *shardIntervalList) } appendStringInfo(insertShardCommand, - "(%s::regclass, %ld, '%c'::\"char\", %s, %s)", + "(%s::regclass, %ld, '%c'::\"char\", %s, %s, %s)", quote_literal_cstr(qualifiedRelationName), shardId, shardInterval->storageType, minHashToken->data, - maxHashToken->data); + maxHashToken->data, + shardInterval->needsIsolatedNode ? "true" : "false"); if (llast(shardIntervalList) != shardInterval) { @@ -1295,7 +1298,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertShardCommand, "SELECT citus_internal_add_shard_metadata(relationname, shardid, " - "storagetype, shardminvalue, shardmaxvalue) " + "storagetype, shardminvalue, shardmaxvalue, needsisolatednode) " "FROM shard_data;"); /* @@ -1417,13 +1420,12 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId) */ char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode) + uint64 shardLength, int32 groupId) { StringInfo command = makeStringInfo(); appendStringInfo(command, UPSERT_PLACEMENT, shardId, shardLength, - groupId, placementId, needsIsolatedNode ? "true" : "false"); + groupId, placementId); return command->data; } @@ -3224,6 +3226,33 @@ citus_internal_delete_partition_metadata(PG_FUNCTION_ARGS) */ Datum citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) +{ + bool expectNeedsIsolatedNode = true; + return citus_internal_add_shard_metadata_internal(fcinfo, expectNeedsIsolatedNode); +} + + +/* + * citus_internal_add_shard_metadata is an internal UDF to + * add a row to pg_dist_shard, but without the needs_isolated_node + * parameter. + */ +Datum +citus_internal_add_shard_metadata_legacy(PG_FUNCTION_ARGS) +{ + bool expectNeedsIsolatedNode = false; + return citus_internal_add_shard_metadata_internal(fcinfo, expectNeedsIsolatedNode); +} + + +/* + * citus_internal_add_shard_metadata_internal is a helper function for + * citus_internal_add_shard_metadata and citus_internal_add_shard_metadata_legacy + * functions. + */ +static Datum +citus_internal_add_shard_metadata_internal(PG_FUNCTION_ARGS, + bool expectNeedsIsolatedNode) { CheckCitusVersion(ERROR); @@ -3248,6 +3277,13 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) shardMaxValue = PG_GETARG_TEXT_P(4); } + bool needsIsolatedNode = false; + if (expectNeedsIsolatedNode) + { + PG_ENSURE_ARGNOTNULL(5, "needs isolated node"); + needsIsolatedNode = PG_GETARG_BOOL(5); + } + /* only owner of the table (or superuser) is allowed to add the Citus metadata */ EnsureTableOwner(relationId); @@ -3268,7 +3304,8 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) shardMaxValue); } - InsertShardRow(relationId, shardId, storageType, shardMinValue, shardMaxValue); + InsertShardRow(relationId, shardId, storageType, shardMinValue, shardMaxValue, + needsIsolatedNode); PG_RETURN_VOID(); } @@ -3447,18 +3484,16 @@ citus_internal_add_placement_metadata(PG_FUNCTION_ARGS) int64 shardLength = PG_GETARG_INT64(1); int32 groupId = PG_GETARG_INT32(2); int64 placementId = PG_GETARG_INT64(3); - bool needsIsolatedNode = PG_GETARG_BOOL(4); citus_internal_add_placement_metadata_internal(shardId, shardLength, - groupId, placementId, - needsIsolatedNode); + groupId, placementId); PG_RETURN_VOID(); } /* - * citus_internal_delete_placement_metadata is an internal UDF to + * citus_internal_add_placement_metadata is an internal UDF to * delete a row from pg_dist_placement. */ Datum @@ -3488,15 +3523,12 @@ citus_internal_add_placement_metadata_legacy(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); int64 shardId = PG_GETARG_INT64(0); - int64 shardLength = PG_GETARG_INT64(1); - int32 groupId = PG_GETARG_INT32(2); - int64 placementId = PG_GETARG_INT64(3); + int64 shardLength = PG_GETARG_INT64(2); + int32 groupId = PG_GETARG_INT32(3); + int64 placementId = PG_GETARG_INT64(4); - bool needsIsolatedNode = false; citus_internal_add_placement_metadata_internal(shardId, shardLength, - groupId, placementId, - needsIsolatedNode); - + groupId, placementId); PG_RETURN_VOID(); } @@ -3507,8 +3539,7 @@ citus_internal_add_placement_metadata_legacy(PG_FUNCTION_ARGS) */ void citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength, - int32 groupId, int64 placementId, - bool needsIsolatedNode) + int32 groupId, int64 placementId) { bool missingOk = false; Oid relationId = LookupShardRelationFromCatalog(shardId, missingOk); @@ -3533,8 +3564,7 @@ citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength, shardLength, groupId); } - InsertShardPlacementRow(shardId, placementId, shardLength, groupId, - needsIsolatedNode); + InsertShardPlacementRow(shardId, placementId, shardLength, groupId); } @@ -3907,7 +3937,7 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS) /* * citus_internal_shard_group_set_needsisolatednode is an internal UDF to - * set needsisolatednode flag for all the placements within the shard group + * set needsisolatednode flag for all the shards within the shard group * that given shard belongs to. */ Datum @@ -4153,14 +4183,12 @@ ShardGroupSetNeedsIsolatedNodeCommand(uint64 shardId, bool enabled) */ char * AddPlacementMetadataCommand(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode) + uint64 shardLength, int32 groupId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld, %s)", - shardId, shardLength, groupId, placementId, - needsIsolatedNode ? "true" : "false"); + "SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)", + shardId, shardLength, groupId, placementId); return command->data; } diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index 7ea95175d..2025fd101 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -116,7 +116,7 @@ static HeapTuple CreateDiskSpaceTuple(TupleDesc tupleDesc, uint64 availableBytes static bool GetLocalDiskSpaceStats(uint64 *availableBytes, uint64 *totalBytes); static void ErrorIfShardIsolationNotPossible(uint64 shardId); static void ShardGroupSetNeedsIsolatedNodeGlobally(uint64 shardId, bool enabled); -static void ShardPlacementSetNeedsIsolatedNode(uint64 placementId, bool enabled); +static void ShardSetNeedsIsolatedNode(uint64 shardId, bool enabled); static BackgroundTask * DeformBackgroundTaskHeapTuple(TupleDesc tupleDescriptor, HeapTuple taskTuple); @@ -362,7 +362,7 @@ citus_relation_size(PG_FUNCTION_ARGS) /* * citus_shard_set_isolated sets the needsisolatednode flag to true for all - * the placements within the shard group that given shard belongs to. + * the shards within the shard group that given shard belongs to. */ Datum citus_shard_set_isolated(PG_FUNCTION_ARGS) @@ -384,7 +384,7 @@ citus_shard_set_isolated(PG_FUNCTION_ARGS) /* * citus_shard_unset_isolated sets the needsisolatednode flag to false for all - * the placements within the shard group that given shard belongs to. + * the shards within the shard group that given shard belongs to. */ Datum citus_shard_unset_isolated(PG_FUNCTION_ARGS) @@ -440,7 +440,7 @@ ShardGroupSetNeedsIsolatedNodeGlobally(uint64 shardId, bool enabled) /* * ShardGroupSetNeedsIsolatedNode sets the needsisolatednode flag to desired - * value for all the placements within the shard group that given shard belongs + * value for all the shards within the shard group that given shard belongs * to. */ void @@ -452,75 +452,63 @@ ShardGroupSetNeedsIsolatedNode(uint64 shardId, bool enabled) ShardInterval *colocatedShardInterval = NULL; foreach_ptr(colocatedShardInterval, colocatedShardIntervalList) { - List *activeShardPlacementList = ActiveShardPlacementList( - colocatedShardInterval->shardId); - ShardPlacement *activeShardPlacement = NULL; - foreach_ptr(activeShardPlacement, activeShardPlacementList) - { - ShardPlacementSetNeedsIsolatedNode(activeShardPlacement->placementId, - enabled); - } + ShardSetNeedsIsolatedNode(colocatedShardInterval->shardId, + enabled); } } /* - * ShardPlacementSetNeedsIsolatedNode sets the needsisolatednode flag to desired - * value for the given placement. + * ShardSetNeedsIsolatedNode sets the needsisolatednode flag to desired + * value for the given shard. */ static void -ShardPlacementSetNeedsIsolatedNode(uint64 placementId, bool enabled) +ShardSetNeedsIsolatedNode(uint64 shardId, bool enabled) { - Relation pgDistPlacement = table_open(DistPlacementRelationId(), RowExclusiveLock); + Relation pgDistShard = table_open(DistShardRelationId(), RowExclusiveLock); ScanKeyData scanKey[1]; int scanKeyCount = 1; - ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid, - BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId)); + ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, + BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); bool indexOK = true; - Oid indexId = DistPlacementPlacementidIndexId(); - SysScanDesc scanDescriptor = systable_beginscan(pgDistPlacement, + Oid indexId = DistShardShardidIndexId(); + SysScanDesc scanDescriptor = systable_beginscan(pgDistShard, indexId, indexOK, NULL, scanKeyCount, scanKey); HeapTuple heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { - ereport(ERROR, (errmsg("could not find valid entry for shard placement " + ereport(ERROR, (errmsg("could not find valid entry for shard " UINT64_FORMAT, - placementId))); + shardId))); } - Datum values[Natts_pg_dist_placement]; - bool isnull[Natts_pg_dist_placement]; - bool replace[Natts_pg_dist_placement]; + Datum values[Natts_pg_dist_shard]; + bool isnull[Natts_pg_dist_shard]; + bool replace[Natts_pg_dist_shard]; memset(values, 0, sizeof(values)); memset(isnull, false, sizeof(isnull)); memset(replace, false, sizeof(replace)); - values[Anum_pg_dist_placement_needsisolatednode - 1] = BoolGetDatum(enabled); - isnull[Anum_pg_dist_placement_needsisolatednode - 1] = false; - replace[Anum_pg_dist_placement_needsisolatednode - 1] = true; + values[Anum_pg_dist_shard_needsisolatednode - 1] = BoolGetDatum(enabled); + isnull[Anum_pg_dist_shard_needsisolatednode - 1] = false; + replace[Anum_pg_dist_shard_needsisolatednode - 1] = true; - TupleDesc tupleDescriptor = RelationGetDescr(pgDistPlacement); + TupleDesc tupleDescriptor = RelationGetDescr(pgDistShard); heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace); - CatalogTupleUpdate(pgDistPlacement, &heapTuple->t_self, heapTuple); - - bool shardIdIsNull = false; - uint64 shardId = DatumGetInt64(heap_getattr(heapTuple, - Anum_pg_dist_placement_shardid, - tupleDescriptor, &shardIdIsNull)); - Assert(!shardIdIsNull); + CatalogTupleUpdate(pgDistShard, &heapTuple->t_self, heapTuple); CitusInvalidateRelcacheByShardId(shardId); CommandCounterIncrement(); systable_endscan(scanDescriptor); - table_close(pgDistPlacement, NoLock); + table_close(pgDistShard, NoLock); } @@ -1467,6 +1455,7 @@ CopyShardInterval(ShardInterval *srcInterval) destInterval->maxValueExists = srcInterval->maxValueExists; destInterval->shardId = srcInterval->shardId; destInterval->shardIndex = srcInterval->shardIndex; + destInterval->needsIsolatedNode = srcInterval->needsIsolatedNode; destInterval->minValue = 0; if (destInterval->minValueExists) @@ -1568,7 +1557,7 @@ NodeGroupGetIsolatedShardPlacementGroup(int32 groupId) } nodeShardPlacementGroup = shardPlacementGroup; - shardPlacementGroupNeedsIsolatedNode = placement->needsIsolatedNode; + shardPlacementGroupNeedsIsolatedNode = shardInterval->needsIsolatedNode; heapTuple = systable_getnext(scanDescriptor); } @@ -1947,10 +1936,9 @@ TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple) { bool isNullArray[Natts_pg_dist_placement]; Datum datumArray[Natts_pg_dist_placement]; - memset(datumArray, 0, sizeof(datumArray)); - memset(isNullArray, false, sizeof(isNullArray)); - if (HeapTupleHasNulls(heapTuple)) + if (HeapTupleHeaderGetNatts(heapTuple->t_data) != Natts_pg_dist_placement || + HeapTupleHasNulls(heapTuple)) { ereport(ERROR, (errmsg("unexpected null in pg_dist_placement tuple"))); } @@ -1970,8 +1958,6 @@ TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple) datumArray[Anum_pg_dist_placement_shardlength - 1]); shardPlacement->groupId = DatumGetInt32( datumArray[Anum_pg_dist_placement_groupid - 1]); - shardPlacement->needsIsolatedNode = DatumGetBool( - datumArray[Anum_pg_dist_placement_needsisolatednode - 1]); return shardPlacement; } @@ -2026,7 +2012,8 @@ IsDummyPlacement(ShardPlacement *taskPlacement) */ void InsertShardRow(Oid relationId, uint64 shardId, char storageType, - text *shardMinValue, text *shardMaxValue) + text *shardMinValue, text *shardMaxValue, + bool needsIsolatedNode) { Datum values[Natts_pg_dist_shard]; bool isNulls[Natts_pg_dist_shard]; @@ -2038,6 +2025,7 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType, values[Anum_pg_dist_shard_logicalrelid - 1] = ObjectIdGetDatum(relationId); values[Anum_pg_dist_shard_shardid - 1] = Int64GetDatum(shardId); values[Anum_pg_dist_shard_shardstorage - 1] = CharGetDatum(storageType); + values[Anum_pg_dist_shard_needsisolatednode - 1] = BoolGetDatum(needsIsolatedNode); /* dropped shardalias column must also be set; it is still part of the tuple */ isNulls[Anum_pg_dist_shard_shardalias_DROPPED - 1] = true; @@ -2076,15 +2064,12 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType, */ ShardPlacement * InsertShardPlacementRowGlobally(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode) + uint64 shardLength, int32 groupId) { - InsertShardPlacementRow(shardId, placementId, shardLength, groupId, - needsIsolatedNode); + InsertShardPlacementRow(shardId, placementId, shardLength, groupId); char *insertPlacementCommand = - AddPlacementMetadataCommand(shardId, placementId, shardLength, groupId, - needsIsolatedNode); + AddPlacementMetadataCommand(shardId, placementId, shardLength, groupId); SendCommandToWorkersWithMetadata(insertPlacementCommand); return LoadShardPlacement(shardId, placementId); @@ -2099,8 +2084,7 @@ InsertShardPlacementRowGlobally(uint64 shardId, uint64 placementId, */ uint64 InsertShardPlacementRow(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode) + uint64 shardLength, int32 groupId) { Datum values[Natts_pg_dist_placement]; bool isNulls[Natts_pg_dist_placement]; @@ -2118,8 +2102,6 @@ InsertShardPlacementRow(uint64 shardId, uint64 placementId, values[Anum_pg_dist_placement_shardstate - 1] = Int32GetDatum(1); values[Anum_pg_dist_placement_shardlength - 1] = Int64GetDatum(shardLength); values[Anum_pg_dist_placement_groupid - 1] = Int32GetDatum(groupId); - values[Anum_pg_dist_placement_needsisolatednode - 1] = - BoolGetDatum(needsIsolatedNode); /* open shard placement relation and insert new tuple */ Relation pgDistPlacement = table_open(DistPlacementRelationId(), RowExclusiveLock); @@ -2357,7 +2339,8 @@ DeleteShardPlacementRow(uint64 placementId) uint64 shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid, tupleDescriptor, &isNull); - if (HeapTupleHasNulls(heapTuple)) + if (HeapTupleHeaderGetNatts(heapTuple->t_data) != Natts_pg_dist_placement || + HeapTupleHasNulls(heapTuple)) { ereport(ERROR, (errmsg("unexpected null in pg_dist_placement tuple"))); } diff --git a/src/backend/distributed/operations/citus_split_shard_by_split_points.c b/src/backend/distributed/operations/citus_split_shard_by_split_points.c index ea48cd87b..1ba9ffa48 100644 --- a/src/backend/distributed/operations/citus_split_shard_by_split_points.c +++ b/src/backend/distributed/operations/citus_split_shard_by_split_points.c @@ -54,9 +54,9 @@ citus_split_shard_by_split_points(PG_FUNCTION_ARGS) Oid shardTransferModeOid = PG_GETARG_OID(3); SplitMode shardSplitMode = LookupSplitMode(shardTransferModeOid); - /* we don't inherit needsisolatednode for new placements */ - List *needsIsolatedNodeForPlacementList = - GenerateListFromIntElement(false, list_length(nodeIdsForPlacementList)); + /* we don't inherit needsisolatednode for new shards */ + List *needsIsolatedNodeForPlacementList = + GenerateListFromIntElement(false, list_length(nodeIdsForPlacementList)); DistributionColumnMap *distributionColumnOverrides = NULL; List *sourceColocatedShardIntervalList = NIL; @@ -66,7 +66,7 @@ citus_split_shard_by_split_points(PG_FUNCTION_ARGS) shardIdToSplit, shardSplitPointsList, nodeIdsForPlacementList, - needsIsolatedNodeForPlacementList, + needsIsolatedNodeForPlacementList, distributionColumnOverrides, sourceColocatedShardIntervalList, INVALID_COLOCATION_ID); diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index 191c48b4f..eb7a2fde4 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -190,8 +190,9 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, text *minHashTokenText = IntegerToText(shardMinHashToken); text *maxHashTokenText = IntegerToText(shardMaxHashToken); + bool needsIsolatedNode = false; InsertShardRow(distributedTableId, *shardIdPtr, shardStorageType, - minHashTokenText, maxHashTokenText); + minHashTokenText, maxHashTokenText, needsIsolatedNode); InsertShardPlacementRows(distributedTableId, *shardIdPtr, @@ -288,8 +289,10 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool List *sourceShardPlacementList = ShardPlacementListSortedByWorker( sourceShardId); + /* inherit from the colocated shard */ + bool needsIsolatedNode = sourceShardInterval->needsIsolatedNode; InsertShardRow(targetRelationId, *newShardIdPtr, targetShardStorageType, - shardMinValueText, shardMaxValueText); + shardMinValueText, shardMaxValueText, needsIsolatedNode); ShardPlacement *sourcePlacement = NULL; foreach_ptr(sourcePlacement, sourceShardPlacementList) @@ -300,8 +303,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool InsertShardPlacementRow(*newShardIdPtr, INVALID_PLACEMENT_ID, shardSize, - groupId, - sourcePlacement->needsIsolatedNode); + groupId); } } @@ -372,8 +374,9 @@ CreateReferenceTableShard(Oid distributedTableId) /* get the next shard id */ uint64 shardId = GetNextShardId(); + bool needsIsolatedNode = false; InsertShardRow(distributedTableId, shardId, shardStorageType, shardMinValue, - shardMaxValue); + shardMaxValue, needsIsolatedNode); InsertShardPlacementRows(distributedTableId, shardId, @@ -428,8 +431,9 @@ CreateSingleShardTableShardWithRoundRobinPolicy(Oid relationId, uint32 colocatio text *minHashTokenText = NULL; text *maxHashTokenText = NULL; uint64 shardId = GetNextShardId(); + bool needsIsolatedNode = false; InsertShardRow(relationId, shardId, shardStorageType, - minHashTokenText, maxHashTokenText); + minHashTokenText, maxHashTokenText, needsIsolatedNode); int replicationFactor = 1; InsertShardPlacementRows(relationId, diff --git a/src/backend/distributed/operations/isolate_shards.c b/src/backend/distributed/operations/isolate_shards.c index 6cf9cc8c7..5bd798d07 100644 --- a/src/backend/distributed/operations/isolate_shards.c +++ b/src/backend/distributed/operations/isolate_shards.c @@ -166,9 +166,9 @@ isolate_tenant_to_new_shard(PG_FUNCTION_ARGS) nodeIdsForPlacementList = lappend_int(nodeIdsForPlacementList, sourceNodeId); } - /* we don't inherit needsisolatednode for new placements */ - List *needsIsolatedNodeForPlacementList = - GenerateListFromIntElement(false, list_length(nodeIdsForPlacementList)); + /* we don't inherit needsisolatednode for new shards */ + List *needsIsolatedNodeForPlacementList = + GenerateListFromIntElement(false, list_length(nodeIdsForPlacementList)); DistributionColumnMap *distributionColumnOverrides = NULL; List *sourceColocatedShardIntervalList = NIL; @@ -178,7 +178,7 @@ isolate_tenant_to_new_shard(PG_FUNCTION_ARGS) sourceShard->shardId, shardSplitPointsList, nodeIdsForPlacementList, - needsIsolatedNodeForPlacementList, + needsIsolatedNodeForPlacementList, distributionColumnOverrides, sourceColocatedShardIntervalList, INVALID_COLOCATION_ID); diff --git a/src/backend/distributed/operations/rebalancer_placement_isolation.c b/src/backend/distributed/operations/rebalancer_placement_isolation.c index cfa057991..623459545 100644 --- a/src/backend/distributed/operations/rebalancer_placement_isolation.c +++ b/src/backend/distributed/operations/rebalancer_placement_isolation.c @@ -227,7 +227,8 @@ NodePlacementGroupHashAssignNodes(HTAB *nodePlacementGroupHash, ShardPlacement *shardPlacement = NULL; foreach_ptr(shardPlacement, shardPlacementList) { - if (!shardPlacement->needsIsolatedNode) + ShardInterval *shardInterval = LoadShardInterval(shardPlacement->shardId); + if (!shardInterval->needsIsolatedNode) { continue; } @@ -258,8 +259,6 @@ NodePlacementGroupHashAssignNodes(HTAB *nodePlacementGroupHash, ShardPlacement *unassignedShardPlacement = NULL; foreach_ptr(unassignedShardPlacement, unassignedShardPlacementList) { - Assert(unassignedShardPlacement->needsIsolatedNode); - bool isolated = false; while (!isolated && availableNodeIdx < list_length(availableWorkerList)) { @@ -351,8 +350,8 @@ RebalancerPlacementIsolationContextPlacementIsAllowedOnWorker( NodePlacementGroupHashGetNodeWithGroupId(nodePlacementGroupHash, workerNode->groupId); - ShardPlacement *shardPlacement = LoadShardPlacement(shardId, placementId); - if (!shardPlacement->needsIsolatedNode) + ShardInterval *shardInterval = LoadShardInterval(shardId); + if (!shardInterval->needsIsolatedNode) { /* * It doesn't need an isolated node, but is the node used to isolate diff --git a/src/backend/distributed/operations/replicate_none_dist_table_shard.c b/src/backend/distributed/operations/replicate_none_dist_table_shard.c index 667b4f3c6..c28490367 100644 --- a/src/backend/distributed/operations/replicate_none_dist_table_shard.c +++ b/src/backend/distributed/operations/replicate_none_dist_table_shard.c @@ -4,11 +4,6 @@ * Routines to replicate shard of none-distributed table to * a remote node. * - * Procedures defined in this file assume that given none-distributed - * table was a Citus local table, caller updated the metadata to convert - * it to another none-distributed table type, and now wants to replicate - * the shard of the table to a remote node as part of the conversion. - * * Copyright (c) Citus Data, Inc. * *------------------------------------------------------------------------- @@ -63,25 +58,14 @@ NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, uint64 shardLength = ShardLength(shardId); - /* we've already verified that table has a coordinator placement */ - ShardPlacement *coordinatorPlacement = - linitial(ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID)); - /* insert new placements to pg_dist_placement */ List *insertedPlacementList = NIL; WorkerNode *targetNode = NULL; foreach_ptr(targetNode, targetNodeList) { - /* - * needsIsolatedNode cannot be true because the input table was - * originally a Citus local table. - */ - Assert(!coordinatorPlacement->needsIsolatedNode); - ShardPlacement *shardPlacement = InsertShardPlacementRowGlobally(shardId, GetNextPlacementId(), - shardLength, targetNode->groupId, - coordinatorPlacement->needsIsolatedNode); + shardLength, targetNode->groupId); /* and save the placement for shard creation on workers */ insertedPlacementList = lappend(insertedPlacementList, shardPlacement); @@ -94,6 +78,8 @@ NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, /* fetch coordinator placement before deleting it */ Oid localPlacementTableId = GetTableLocalShardOid(noneDistTableId, shardId); + ShardPlacement *coordinatorPlacement = + linitial(ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID)); /* * CreateForeignKeysFromReferenceTablesOnShards and CopyFromLocalTableIntoDistTable @@ -120,16 +106,9 @@ NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId, */ CreateForeignKeysFromReferenceTablesOnShards(noneDistTableId); - /* - * Using the same placement id, re-insert the deleted placement. - * - * needsIsolatedNode cannot be true because the input table was originally - * a Citus local table. - */ - Assert(!coordinatorPlacement->needsIsolatedNode); + /* using the same placement id, re-insert the deleted placement */ InsertShardPlacementRowGlobally(shardId, coordinatorPlacement->placementId, - shardLength, COORDINATOR_GROUP_ID, - coordinatorPlacement->needsIsolatedNode); + shardLength, COORDINATOR_GROUP_ID); } diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 567614db2..14f927fe6 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -472,7 +472,6 @@ FullShardPlacementList(Oid relationId, ArrayType *excludedShardArray) placement->shardLength = groupPlacement->shardLength; placement->groupId = groupPlacement->groupId; placement->nodeId = worker->nodeId; - placement->needsIsolatedNode = groupPlacement->needsIsolatedNode; placement->nodeName = pstrdup(worker->workerName); placement->nodePort = worker->workerPort; placement->placementId = groupPlacement->placementId; diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index 8556e3f81..d7bf1bc62 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -92,24 +92,26 @@ static void CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitInterval static void CreateReplicaIdentitiesForDummyShards(HTAB *mapOfPlacementToDummyShardList); static void CreateObjectOnPlacement(List *objectCreationCommandList, WorkerNode *workerNode); -static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardList, - List *splitPointsForShard); +static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardList, + List *splitPointsForShard, + List *needsIsolatedNodeForShardList); static void CreateSplitIntervalsForShard(ShardInterval *sourceShard, List *splitPointsForShard, + List *needsIsolatedNodeForShardList, List **shardSplitChildrenIntervalList); static void BlockingShardSplit(SplitOperation splitOperation, uint64 splitWorkflowId, List *sourceColocatedShardIntervalList, List *shardSplitPointsList, List *workersForPlacementList, - List *needsIsolatedNodeForPlacementList, + List *needsIsolatedNodeForShardList, DistributionColumnMap *distributionColumnOverrides); static void NonBlockingShardSplit(SplitOperation splitOperation, uint64 splitWorkflowId, List *sourceColocatedShardIntervalList, List *shardSplitPointsList, List *workersForPlacementList, - List *needsIsolatedNodeForPlacementList, + List *needsIsolatedNodeForShardList, DistributionColumnMap *distributionColumnOverrides, uint32 targetColocationId); static void DoSplitCopy(WorkerNode *sourceShardNode, @@ -130,8 +132,7 @@ static void UpdateDistributionColumnsForShardGroup(List *colocatedShardList, int shardCount, uint32 colocationId); static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, - List *workersForPlacementList, - List *needsIsolatedNodeForPlacementList); + List *workersForPlacementList); static void CreatePartitioningHierarchyForBlockingSplit( List *shardGroupSplitIntervalListList, List *workersForPlacementList); @@ -423,19 +424,19 @@ GetWorkerNodesFromWorkerIds(List *nodeIdsForPlacementList) /* * SplitShard API to split a given shard (or shard group) based on specified split points * to a set of destination nodes. - * 'splitMode' : Mode of split operation. - * 'splitOperation' : Customer operation that triggered split. - * 'shardInterval' : Source shard interval to be split. - * 'shardSplitPointsList' : Split Points list for the source 'shardInterval'. - * 'nodeIdsForPlacementList' : Placement list corresponding to split children. - * 'needsIsolatedNodeForPlacementList' : Whether each split children needs an isolated node. - * 'distributionColumnOverrides' : Maps relation IDs to distribution columns. - * If not specified, the distribution column is read - * from the metadata. - * 'colocatedShardIntervalList' : Shard interval list for colocation group. (only used for - * create_distributed_table_concurrently). - * 'targetColocationId' : Specifies the colocation ID (only used for - * create_distributed_table_concurrently). + * 'splitMode' : Mode of split operation. + * 'splitOperation' : Customer operation that triggered split. + * 'shardInterval' : Source shard interval to be split. + * 'shardSplitPointsList' : Split Points list for the source 'shardInterval'. + * 'nodeIdsForPlacementList' : Placement list corresponding to split children. + * 'needsIsolatedNodeForShardList' : Whether each split children needs an isolated node. + * 'distributionColumnOverrides' : Maps relation IDs to distribution columns. + * If not specified, the distribution column is read + * from the metadata. + * 'colocatedShardIntervalList' : Shard interval list for colocation group. (only used for + * create_distributed_table_concurrently). + * 'targetColocationId' : Specifies the colocation ID (only used for + * create_distributed_table_concurrently). */ void SplitShard(SplitMode splitMode, @@ -443,7 +444,7 @@ SplitShard(SplitMode splitMode, uint64 shardIdToSplit, List *shardSplitPointsList, List *nodeIdsForPlacementList, - List *needsIsolatedNodeForPlacementList, + List *needsIsolatedNodeForShardList, DistributionColumnMap *distributionColumnOverrides, List *colocatedShardIntervalList, uint32 targetColocationId) @@ -517,7 +518,7 @@ SplitShard(SplitMode splitMode, sourceColocatedShardIntervalList, shardSplitPointsList, workersForPlacementList, - needsIsolatedNodeForPlacementList, + needsIsolatedNodeForShardList, distributionColumnOverrides); } else @@ -530,7 +531,7 @@ SplitShard(SplitMode splitMode, sourceColocatedShardIntervalList, shardSplitPointsList, workersForPlacementList, - needsIsolatedNodeForPlacementList, + needsIsolatedNodeForShardList, distributionColumnOverrides, targetColocationId); @@ -547,12 +548,12 @@ SplitShard(SplitMode splitMode, /* * SplitShard API to split a given shard (or shard group) in blocking fashion * based on specified split points to a set of destination nodes. - * splitOperation : Customer operation that triggered split. - * splitWorkflowId : Number used to identify split workflow in names. - * sourceColocatedShardIntervalList : Source shard group to be split. - * shardSplitPointsList : Split Points list for the source 'shardInterval'. - * workersForPlacementList : Placement list corresponding to split children. - * needsIsolatedNodeForPlacementList : Whether each split children needs an isolated node. + * splitOperation : Customer operation that triggered split. + * splitWorkflowId : Number used to identify split workflow in names. + * sourceColocatedShardIntervalList : Source shard group to be split. + * shardSplitPointsList : Split Points list for the source 'shardInterval'. + * workersForPlacementList : Placement list corresponding to split children. + * needsIsolatedNodeForShardList : Whether each split children needs an isolated node. */ static void BlockingShardSplit(SplitOperation splitOperation, @@ -560,7 +561,7 @@ BlockingShardSplit(SplitOperation splitOperation, List *sourceColocatedShardIntervalList, List *shardSplitPointsList, List *workersForPlacementList, - List *needsIsolatedNodeForPlacementList, + List *needsIsolatedNodeForShardList, DistributionColumnMap *distributionColumnOverrides) { const char *operationName = SplitOperationAPIName[splitOperation]; @@ -570,7 +571,8 @@ BlockingShardSplit(SplitOperation splitOperation, /* First create shard interval metadata for split children */ List *shardGroupSplitIntervalListList = CreateSplitIntervalsForShardGroup( sourceColocatedShardIntervalList, - shardSplitPointsList); + shardSplitPointsList, + needsIsolatedNodeForShardList); /* Only single placement allowed (already validated RelationReplicationFactor = 1) */ ShardInterval *firstShard = linitial(sourceColocatedShardIntervalList); @@ -624,8 +626,7 @@ BlockingShardSplit(SplitOperation splitOperation, /* Insert new shard and placement metdata */ InsertSplitChildrenShardMetadata(shardGroupSplitIntervalListList, - workersForPlacementList, - needsIsolatedNodeForPlacementList); + workersForPlacementList); /* create partitioning hierarchy, if any */ CreatePartitioningHierarchyForBlockingSplit( @@ -1029,10 +1030,12 @@ CreateObjectOnPlacement(List *objectCreationCommandList, * [ S1_1(-2147483648, 0), S1_2(1, 2147483647) ], // Split Interval List for S1. * [ S2_1(-2147483648, 0), S2_2(1, 2147483647) ] // Split Interval List for S2. * ] + * 'needsIsolatedNodeForShardList': Whether each split children needs an isolated node */ static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList, - List *splitPointsForShard) + List *splitPointsForShard, + List *needsIsolatedNodeForShardList) { List *shardGroupSplitIntervalListList = NIL; @@ -1041,6 +1044,7 @@ CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList, { List *shardSplitIntervalList = NIL; CreateSplitIntervalsForShard(shardToSplitInterval, splitPointsForShard, + needsIsolatedNodeForShardList, &shardSplitIntervalList); shardGroupSplitIntervalListList = lappend(shardGroupSplitIntervalListList, @@ -1059,6 +1063,7 @@ CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList, static void CreateSplitIntervalsForShard(ShardInterval *sourceShard, List *splitPointsForShard, + List *needsIsolatedNodeForShardList, List **shardSplitChildrenIntervalList) { /* For 'N' split points, we will have N+1 shard intervals created. */ @@ -1083,7 +1088,8 @@ CreateSplitIntervalsForShard(ShardInterval *sourceShard, ShardInterval *splitChildShardInterval = CopyShardInterval(sourceShard); splitChildShardInterval->shardIndex = -1; splitChildShardInterval->shardId = GetNextShardIdForSplitChild(); - + splitChildShardInterval->needsIsolatedNode = + list_nth_int(needsIsolatedNodeForShardList, index); splitChildShardInterval->minValueExists = true; splitChildShardInterval->minValue = currentSplitChildMinValue; splitChildShardInterval->maxValueExists = true; @@ -1162,8 +1168,7 @@ UpdateDistributionColumnsForShardGroup(List *colocatedShardList, */ static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, - List *workersForPlacementList, - List *needsIsolatedNodeForPlacementList) + List *workersForPlacementList) { List *shardIntervalList = NIL; List *syncedShardList = NIL; @@ -1176,25 +1181,24 @@ InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, /* * Iterate on split shards list for a given shard and insert metadata. */ - for (int i = 0; i < list_length(shardIntervalList); i++) + ShardInterval *shardInterval = NULL; + WorkerNode *workerPlacementNode = NULL; + forboth_ptr(shardInterval, shardIntervalList, workerPlacementNode, + workersForPlacementList) { - ShardInterval *shardInterval = list_nth(shardIntervalList, i); - WorkerNode *workerPlacementNode = list_nth(workersForPlacementList, i); - bool needsIsolatedNode = list_nth_int(needsIsolatedNodeForPlacementList, i); - InsertShardRow( shardInterval->relationId, shardInterval->shardId, shardInterval->storageType, IntegerToText(DatumGetInt32(shardInterval->minValue)), - IntegerToText(DatumGetInt32(shardInterval->maxValue))); + IntegerToText(DatumGetInt32(shardInterval->maxValue)), + shardInterval->needsIsolatedNode); InsertShardPlacementRow( shardInterval->shardId, INVALID_PLACEMENT_ID, /* triggers generation of new id */ 0, /* shard length (zero for HashDistributed Table) */ - workerPlacementNode->groupId, - needsIsolatedNode); + workerPlacementNode->groupId); if (ShouldSyncTableMetadata(shardInterval->relationId)) { @@ -1379,17 +1383,17 @@ AcquireNonblockingSplitLock(Oid relationId) /* * SplitShard API to split a given shard (or shard group) in non-blocking fashion * based on specified split points to a set of destination nodes. - * splitOperation : Customer operation that triggered split. - * splitWorkflowId : Number used to identify split workflow in names. - * sourceColocatedShardIntervalList : Source shard group to be split. - * shardSplitPointsList : Split Points list for the source 'shardInterval'. - * workersForPlacementList : Placement list corresponding to split children. - * needsIsolatedNodeForPlacementList : Whether each split children needs an isolated node. - * distributionColumnList : Maps relation IDs to distribution columns. - * If not specified, the distribution column is read - * from the metadata. - * targetColocationId : Specifies the colocation ID (only used for - * create_distributed_table_concurrently). + * splitOperation : Customer operation that triggered split. + * splitWorkflowId : Number used to identify split workflow in names. + * sourceColocatedShardIntervalList : Source shard group to be split. + * shardSplitPointsList : Split Points list for the source 'shardInterval'. + * workersForPlacementList : Placement list corresponding to split children. + * needsIsolatedNodeForShardList : Whether each split children needs an isolated node. + * distributionColumnList : Maps relation IDs to distribution columns. + * If not specified, the distribution column is read + * from the metadata. + * targetColocationId : Specifies the colocation ID (only used for + * create_distributed_table_concurrently). */ void NonBlockingShardSplit(SplitOperation splitOperation, @@ -1397,7 +1401,7 @@ NonBlockingShardSplit(SplitOperation splitOperation, List *sourceColocatedShardIntervalList, List *shardSplitPointsList, List *workersForPlacementList, - List *needsIsolatedNodeForPlacementList, + List *needsIsolatedNodeForShardList, DistributionColumnMap *distributionColumnOverrides, uint32 targetColocationId) { @@ -1411,7 +1415,8 @@ NonBlockingShardSplit(SplitOperation splitOperation, /* First create shard interval metadata for split children */ List *shardGroupSplitIntervalListList = CreateSplitIntervalsForShardGroup( sourceColocatedShardIntervalList, - shardSplitPointsList); + shardSplitPointsList, + needsIsolatedNodeForShardList); ShardInterval *firstShard = linitial(sourceColocatedShardIntervalList); @@ -1614,8 +1619,7 @@ NonBlockingShardSplit(SplitOperation splitOperation, /* 12) Insert new shard and placement metdata */ InsertSplitChildrenShardMetadata(shardGroupSplitIntervalListList, - workersForPlacementList, - needsIsolatedNodeForPlacementList); + workersForPlacementList); /* 13) create partitioning hierarchy, if any, this needs to be done * after the metadata is correct, because it fails for some diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 952fc1b48..23925a315 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -122,9 +122,7 @@ static void EnsureShardCanBeCopied(int64 shardId, const char *sourceNodeName, static List * RecreateTableDDLCommandList(Oid relationId); static void EnsureTableListOwner(List *tableIdList); static void ErrorIfReplicatingDistributedTableWithFKeys(List *tableIdList); -static bool NewPlacementNeedsIsolatedNode(uint64 shardId, - char *sourceNodeName, - int32 sourceNodePort); + static void DropShardPlacementsFromMetadata(List *shardList, char *nodeName, int32 nodePort); @@ -526,34 +524,15 @@ TransferShards(int64 shardId, char *sourceNodeName, uint32 groupId = GroupForNode(targetNodeName, targetNodePort); uint64 placementId = GetNextPlacementId(); - /* - * Decide whether the new placement needs isolated node or not. - * - * Note that even if the new placement needs isolated node, we don't - * enforce it here because we assume that user is aware of what they're - * doing if this shard transfer operation is initiated by the user. - * Consequence of this assumption is that if user is transferring a - * placement that needs isolated node to a node that has some other - * placements, then the next call made to rebalancer would overwrite - * this operation (by moving the placement to an appropriate node). - * - * Otherwise, i.e., if this is initiated by the rebalancer, rebalancer - * anyway enforces isolation by choosing an appropriate node. - */ - bool newPlacementNeedsIsolatedNode = NewPlacementNeedsIsolatedNode( - colocatedShardId, - sourceNodeName, - sourceNodePort); InsertShardPlacementRow(colocatedShardId, placementId, ShardLength(colocatedShardId), - groupId, newPlacementNeedsIsolatedNode); + groupId); if (transferType == SHARD_TRANSFER_COPY && ShouldSyncTableMetadata(colocatedShard->relationId)) { char *placementCommand = PlacementUpsertCommand(colocatedShardId, placementId, - 0, groupId, - newPlacementNeedsIsolatedNode); + 0, groupId); SendCommandToWorkersWithMetadata(placementCommand); } @@ -2000,24 +1979,6 @@ RecreateTableDDLCommandList(Oid relationId) } -/* - * NewPlacementNeedsIsolatedNode if the placement we're creating based on the - * placement we're replicating from sourceNodeName/sourceNodePort needs - * isolation. - */ -static bool -NewPlacementNeedsIsolatedNode(uint64 shardId, char *sourceNodeName, int32 sourceNodePort) -{ - /* assume we're transferring the first placement */ - uint32 groupId = GroupForNode(sourceNodeName, sourceNodePort); - - List *activeShardPlacementListOnGroup = ActiveShardPlacementListOnGroup(shardId, - groupId); - ShardPlacement *firstPlacementOnGroup = linitial(activeShardPlacementListOnGroup); - return firstPlacementOnGroup->needsIsolatedNode; -} - - /* * DropShardPlacementsFromMetadata drops the shard placement metadata for * the shard placements of given shard interval list from pg_dist_placement. diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index 6660b260a..793a8f9c4 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -198,7 +198,9 @@ master_create_empty_shard(PG_FUNCTION_ARGS) candidateNodeIndex++; } - InsertShardRow(relationId, shardId, storageType, nullMinValue, nullMaxValue); + bool needsIsolatedNode = false; + InsertShardRow(relationId, shardId, storageType, nullMinValue, nullMaxValue, + needsIsolatedNode); CreateAppendDistributedShardPlacements(relationId, shardId, candidateNodeList, ShardReplicationFactor); @@ -369,9 +371,8 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, ExecuteCriticalRemoteCommandList(connection, commandList); - bool needsIsolatedNode = false; InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, shardSize, - nodeGroupId, needsIsolatedNode); + nodeGroupId); placementsCreated++; if (placementsCreated >= replicationFactor) @@ -392,9 +393,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, /* * InsertShardPlacementRows inserts shard placements to the metadata table on * the coordinator node. - * - * This function assumes that the caller is inserting the placements for a - * newly created shard. As a result, always sets needsisolatednode to false. */ void InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList, @@ -409,12 +407,10 @@ InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList, uint32 nodeGroupId = workerNode->groupId; const uint64 shardSize = 0; - bool needsIsolatedNode = false; InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, shardSize, - nodeGroupId, - needsIsolatedNode); + nodeGroupId); } } @@ -824,11 +820,9 @@ UpdateShardSize(uint64 shardId, ShardInterval *shardInterval, Oid relationId, { uint64 placementId = placement->placementId; int32 groupId = placement->groupId; - bool needsIsolatedNode = placement->needsIsolatedNode; DeleteShardPlacementRow(placementId); - InsertShardPlacementRow(shardId, placementId, shardSize, groupId, - needsIsolatedNode); + InsertShardPlacementRow(shardId, placementId, shardSize, groupId); } } diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 21befa6f2..6c0ba5333 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -4521,6 +4521,8 @@ GenerateSyntheticShardIntervalArray(int partitionCount) shardInterval->shardId = INVALID_SHARD_ID; shardInterval->valueTypeId = INT4OID; + shardInterval->needsIsolatedNode = false; + shardIntervalArray[shardIndex] = shardInterval; } diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 22b1634f3..8e93dfeea 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -4,18 +4,9 @@ #include "udfs/citus_add_rebalance_strategy/12.2-1.sql" -ALTER TABLE pg_dist_placement ADD COLUMN needsisolatednode boolean NOT NULL DEFAULT false; +ALTER TABLE pg_dist_shard ADD COLUMN needsisolatednode boolean NOT NULL DEFAULT false; --- Drop the legacy one that survived from 10.2-1, not the one created in 11.2-1. --- --- And as we did when upgrading from 10.2-1 to 11.2-1, citus_internal_add_placement_metadata/12.2-1.sql --- preserves the one created in 11.2-1 as the "new legacy" one. -DROP FUNCTION pg_catalog.citus_internal_add_placement_metadata( - shard_id bigint, shard_state integer, - shard_length bigint, group_id integer, - placement_id bigint); - -#include "udfs/citus_internal_add_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_shard_metadata/12.2-1.sql" #include "udfs/citus_internal_shard_group_set_needsisolatednode/12.2-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index 973bbba55..032fc1b76 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -2,15 +2,10 @@ #include "../udfs/citus_add_rebalance_strategy/10.1-1.sql" -ALTER TABLE pg_dist_placement DROP COLUMN needsisolatednode; +ALTER TABLE pg_dist_shard DROP COLUMN needsisolatednode; -DROP FUNCTION pg_catalog.citus_internal_add_placement_metadata( - shard_id bigint, - shard_length bigint, group_id integer, - placement_id bigint, - needs_isolated_node boolean); - -#include "../udfs/citus_internal_add_placement_metadata/11.2-1.sql" +DROP FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text, boolean); +#include "../udfs/citus_internal_add_shard_metadata/10.2-1.sql" DROP FUNCTION pg_catalog.citus_shard_set_isolated(shard_id bigint); DROP FUNCTION pg_catalog.citus_shard_unset_isolated(shard_id bigint); diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql deleted file mode 100644 index 00e65cf37..000000000 --- a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql +++ /dev/null @@ -1,24 +0,0 @@ --- create a new function, with needs_isolated_node -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( - shard_id bigint, - shard_length bigint, group_id integer, - placement_id bigint, - needs_isolated_node boolean) - RETURNS void - LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; - -COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint, boolean) IS - 'Inserts into pg_dist_shard_placement with user checks'; - --- replace the old one so it would call the old C function without needs_isolated_node -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( - shard_id bigint, - shard_length bigint, group_id integer, - placement_id bigint) - RETURNS void - LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$; - -COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS - 'Inserts into pg_dist_shard_placement with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql index 00e65cf37..9d1dd4ffa 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql @@ -1,24 +1,24 @@ --- create a new function, with needs_isolated_node -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( - shard_id bigint, - shard_length bigint, group_id integer, - placement_id bigint, - needs_isolated_node boolean) - RETURNS void - LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; - -COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint, boolean) IS - 'Inserts into pg_dist_shard_placement with user checks'; - --- replace the old one so it would call the old C function without needs_isolated_node +-- create a new function, without shardstate CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( shard_id bigint, shard_length bigint, group_id integer, placement_id bigint) RETURNS void LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$; + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS 'Inserts into pg_dist_shard_placement with user checks'; + +-- replace the old one so it would call the old C function with shard_state +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, shard_state integer, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, integer, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql new file mode 100644 index 000000000..482d93df4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql @@ -0,0 +1,23 @@ +-- create a new function, with needs_isolated_node +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text, needs_isolated_node boolean + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text, boolean) IS + 'Inserts into pg_dist_shard with user checks'; + +-- replace the old one so it would call the old C function without needs_isolated_node +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata_legacy$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql index 7411d9179..482d93df4 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql @@ -1,3 +1,16 @@ +-- create a new function, with needs_isolated_node +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text, needs_isolated_node boolean + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text, boolean) IS + 'Inserts into pg_dist_shard with user checks'; + +-- replace the old one so it would call the old C function without needs_isolated_node CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( relation_id regclass, shard_id bigint, storage_type "char", shard_min_value text, @@ -5,6 +18,6 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( ) RETURNS void LANGUAGE C - AS 'MODULE_PATHNAME'; + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata_legacy$$; COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS 'Inserts into pg_dist_shard with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_shard_set_isolated/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_shard_set_isolated/12.2-1.sql index 7c3fd3d1b..2607305ad 100644 --- a/src/backend/distributed/sql/udfs/citus_shard_set_isolated/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/citus_shard_set_isolated/12.2-1.sql @@ -2,3 +2,5 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_set_isolated(shard_id bigint) RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_shard_set_isolated$$; +COMMENT ON FUNCTION pg_catalog.citus_shard_set_isolated(bigint) IS + 'Sets the needsisolatednode flag to true for all the shards in the shard group of the given shard.'; diff --git a/src/backend/distributed/sql/udfs/citus_shard_set_isolated/latest.sql b/src/backend/distributed/sql/udfs/citus_shard_set_isolated/latest.sql index 7c3fd3d1b..2607305ad 100644 --- a/src/backend/distributed/sql/udfs/citus_shard_set_isolated/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_shard_set_isolated/latest.sql @@ -2,3 +2,5 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_set_isolated(shard_id bigint) RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_shard_set_isolated$$; +COMMENT ON FUNCTION pg_catalog.citus_shard_set_isolated(bigint) IS + 'Sets the needsisolatednode flag to true for all the shards in the shard group of the given shard.'; diff --git a/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/12.2-1.sql index 5502b2315..0d30215e5 100644 --- a/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/12.2-1.sql @@ -2,3 +2,5 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_unset_isolated(shard_id bigint RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_shard_unset_isolated$$; +COMMENT ON FUNCTION pg_catalog.citus_shard_unset_isolated(bigint) IS + 'Sets the needsisolatednode flag to false for all the shards in the shard group of the given shard.'; diff --git a/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/latest.sql b/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/latest.sql index 5502b2315..0d30215e5 100644 --- a/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_shard_unset_isolated/latest.sql @@ -2,3 +2,5 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_unset_isolated(shard_id bigint RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_shard_unset_isolated$$; +COMMENT ON FUNCTION pg_catalog.citus_shard_unset_isolated(bigint) IS + 'Sets the needsisolatednode flag to false for all the shards in the shard group of the given shard.'; diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index c3bc7fb51..f322e149c 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -228,8 +228,9 @@ create_monolithic_shard_row(PG_FUNCTION_ARGS) text *minInfoText = cstring_to_text(minInfo->data); text *maxInfoText = cstring_to_text(maxInfo->data); + bool needsIsolatedNode = false; InsertShardRow(distributedTableId, newShardId, SHARD_STORAGE_TABLE, minInfoText, - maxInfoText); + maxInfoText, needsIsolatedNode); PG_RETURN_INT64(newShardId); } diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 897f44e30..8b0f87854 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -73,8 +73,7 @@ typedef struct SequenceInfo extern void citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength, int32 groupId, - int64 placementId, - bool needsIsolatedNode); + int64 placementId); extern void SyncCitusTableMetadata(Oid relationId); extern void EnsureSequentialModeMetadataOperations(void); extern bool ClusterHasKnownMetadataWorkers(void); @@ -113,8 +112,7 @@ extern List * GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId, AclItem *aclItem); extern List * GenerateGrantOnFDWQueriesFromAclItem(Oid serverId, AclItem *aclItem); extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode); + uint64 shardLength, int32 groupId); extern TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId); extern void CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId); extern List * InterTableRelationshipOfRelationCommandList(Oid relationId); @@ -145,8 +143,7 @@ extern char * UpdateNoneDistTableMetadataCommand(Oid relationId, char replicatio uint32 colocationId, bool autoConverted); extern char * ShardGroupSetNeedsIsolatedNodeCommand(uint64 shardId, bool enabled); extern char * AddPlacementMetadataCommand(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode); + uint64 shardLength, int32 groupId); extern char * DeletePlacementMetadataCommand(uint64 placementId); extern MetadataSyncContext * CreateMetadataSyncContext(List *nodeList, @@ -207,10 +204,10 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); #define UPSERT_PLACEMENT \ "INSERT INTO pg_dist_placement " \ "(shardid, shardstate, shardlength, " \ - "groupid, placementid, needsisolatednode) " \ + "groupid, placementid) " \ "VALUES (" UINT64_FORMAT ", 1, " UINT64_FORMAT \ ", %d, " UINT64_FORMAT \ - ", %s) " \ + ") " \ "ON CONFLICT (shardid, groupid) DO UPDATE SET " \ "shardstate = EXCLUDED.shardstate, " \ "shardlength = EXCLUDED.shardlength, " \ diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 65f261607..8ae377877 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -67,6 +67,7 @@ typedef struct ShardInterval Datum maxValue; /* a shard's typed max value datum */ uint64 shardId; int shardIndex; + bool needsIsolatedNode; } ShardInterval; @@ -78,7 +79,6 @@ typedef struct GroupShardPlacement uint64 shardId; uint64 shardLength; int32 groupId; - bool needsIsolatedNode; } GroupShardPlacement; @@ -93,7 +93,6 @@ typedef struct ShardPlacement uint64 shardId; uint64 shardLength; int32 groupId; - bool needsIsolatedNode; /* the rest of the fields aren't from pg_dist_placement */ char *nodeName; @@ -363,16 +362,15 @@ extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList); /* Function declarations to modify shard and shard placement data */ extern void InsertShardRow(Oid relationId, uint64 shardId, char storageType, - text *shardMinValue, text *shardMaxValue); + text *shardMinValue, text *shardMaxValue, + bool needsIsolatedNode); extern void DeleteShardRow(uint64 shardId); extern ShardPlacement * InsertShardPlacementRowGlobally(uint64 shardId, uint64 placementId, uint64 shardLength, - int32 groupId, - bool needsIsolatedNode); + int32 groupId); extern uint64 InsertShardPlacementRow(uint64 shardId, uint64 placementId, - uint64 shardLength, int32 groupId, - bool needsIsolatedNode); + uint64 shardLength, int32 groupId); extern void InsertIntoPgDistPartition(Oid relationId, char distributionMethod, Var *distributionColumn, uint32 colocationId, char replicationModel, bool autoConverted); diff --git a/src/include/distributed/pg_dist_placement.h b/src/include/distributed/pg_dist_placement.h index 5affde1a5..6aecdbf21 100644 --- a/src/include/distributed/pg_dist_placement.h +++ b/src/include/distributed/pg_dist_placement.h @@ -28,7 +28,6 @@ typedef struct FormData_pg_dist_placement int32 shardstate; /* shard state on remote node; see ShardState */ int64 shardlength; /* shard length on remote node; stored as bigint */ int32 groupid; /* the group the shard is placed on */ - bool needsisolatednode; /* whether the placement group needs to be isolated from others */ } FormData_pg_dist_placement; /* ---------------- @@ -42,13 +41,12 @@ typedef FormData_pg_dist_placement *Form_pg_dist_placement; * compiler constants for pg_dist_placement * ---------------- */ -#define Natts_pg_dist_placement 6 +#define Natts_pg_dist_placement 5 #define Anum_pg_dist_placement_placementid 1 #define Anum_pg_dist_placement_shardid 2 #define Anum_pg_dist_placement_shardstate 3 #define Anum_pg_dist_placement_shardlength 4 #define Anum_pg_dist_placement_groupid 5 -#define Anum_pg_dist_placement_needsisolatednode 6 #endif /* PG_DIST_PLACEMENT_H */ diff --git a/src/include/distributed/pg_dist_shard.h b/src/include/distributed/pg_dist_shard.h index 5c98b755f..f676074d9 100644 --- a/src/include/distributed/pg_dist_shard.h +++ b/src/include/distributed/pg_dist_shard.h @@ -30,6 +30,7 @@ typedef struct FormData_pg_dist_shard text shardminvalue; /* partition key's minimum value in shard */ text shardmaxvalue; /* partition key's maximum value in shard */ #endif + bool needsIsolatedNode; /* whether the placements of this shard need to be isolated */ } FormData_pg_dist_shard; /* ---------------- @@ -43,13 +44,14 @@ typedef FormData_pg_dist_shard *Form_pg_dist_shard; * compiler constants for pg_dist_shards * ---------------- */ -#define Natts_pg_dist_shard 6 +#define Natts_pg_dist_shard 7 #define Anum_pg_dist_shard_logicalrelid 1 #define Anum_pg_dist_shard_shardid 2 #define Anum_pg_dist_shard_shardstorage 3 #define Anum_pg_dist_shard_shardalias_DROPPED 4 #define Anum_pg_dist_shard_shardminvalue 5 #define Anum_pg_dist_shard_shardmaxvalue 6 +#define Anum_pg_dist_shard_needsisolatednode 7 /* * Valid values for shard storage types include foreign table, (standard) table diff --git a/src/include/distributed/rebalancer_placement_isolation.h b/src/include/distributed/rebalancer_placement_isolation.h index 7c3127f9e..e84ac0561 100644 --- a/src/include/distributed/rebalancer_placement_isolation.h +++ b/src/include/distributed/rebalancer_placement_isolation.h @@ -2,7 +2,7 @@ * * rebalancer_placement_isolation.h * Routines to determine which worker node should be used to isolate - * a colocated set of shard placements that needs isolation. + * a colocated set of shard placements that need isolation. * * Copyright (c) Citus Data, Inc. * diff --git a/src/include/distributed/shard_split.h b/src/include/distributed/shard_split.h index da1fe088d..c69db61c8 100644 --- a/src/include/distributed/shard_split.h +++ b/src/include/distributed/shard_split.h @@ -43,7 +43,7 @@ extern void SplitShard(SplitMode splitMode, uint64 shardIdToSplit, List *shardSplitPointsList, List *nodeIdsForPlacementList, - List *needsIsolatedNodeForPlacementList, + List *needsIsolatedNodeForPlacementList, DistributionColumnMap *distributionColumnOverrides, List *colocatedShardIntervalList, uint32 targetColocationId); diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 50cf018aa..efa9e310f 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -232,7 +232,7 @@ s/CREATE TABLESPACE test_tablespace LOCATION.*/CREATE TABLESPACE test_tablespace s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g # normalize differences in multi_fix_partition_shard_index_names test -s/NOTICE: issuing WITH placement_data\(shardid, shardlength, groupid, placementid, needsisolatednode\) AS \(VALUES \([0-9]+, [0-9]+, [0-9]+, [0-9]+, (true|false)\)\)/NOTICE: issuing WITH placement_data\(shardid, shardlength, groupid, placementid, needsisolatednode\) AS \(VALUES \(xxxxxx, xxxxxx, xxxxxx, xxxxxx, xxxxxx\)\)/g +s/NOTICE: issuing WITH placement_data\(shardid, shardlength, groupid, placementid\) AS \(VALUES \([0-9]+, [0-9]+, [0-9]+, [0-9]+\)\)/NOTICE: issuing WITH placement_data\(shardid, shardlength, groupid, placementid\) AS \(VALUES \(xxxxxx, xxxxxx, xxxxxx, xxxxxx\)\)/g # global_pid when pg_cancel_backend is sent to workers s/pg_cancel_backend\('[0-9]+'::bigint\)/pg_cancel_backend('xxxxx'::bigint)/g diff --git a/src/test/regress/expected/failure_create_distributed_table_concurrently.out b/src/test/regress/expected/failure_create_distributed_table_concurrently.out index cf1df651a..d557a0a47 100644 --- a/src/test/regress/expected/failure_create_distributed_table_concurrently.out +++ b/src/test/regress/expected/failure_create_distributed_table_concurrently.out @@ -189,10 +189,10 @@ SELECT create_distributed_table_concurrently('table_1', 'id'); (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid = 'table_1'::regclass; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - table_1 | 222247 | t | -2147483648 | -1 - table_1 | 222248 | t | 0 | 2147483647 + table_1 | 222247 | t | -2147483648 | -1 | f + table_1 | 222248 | t | 0 | 2147483647 | f (2 rows) DROP SCHEMA create_dist_tbl_con CASCADE; diff --git a/src/test/regress/expected/isolate_placement.out b/src/test/regress/expected/isolate_placement.out index ae546fac0..64bcadb07 100644 --- a/src/test/regress/expected/isolate_placement.out +++ b/src/test/regress/expected/isolate_placement.out @@ -89,7 +89,7 @@ SELECT create_distributed_table('dist_3', 'a', colocate_with=>'dist_1'); SET citus.shard_replication_factor TO 1; -- none of the placements have been marked as needsisolatednode yet SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; result @@ -118,14 +118,14 @@ SELECT citus_shard_set_isolated(:shardgroup_10_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}, {"10": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}, {"10": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}, {"10": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}, {"10": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}, {"10": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}, {"10": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] (3 rows) SELECT shardids[1] AS shardgroup_3_shardid @@ -138,14 +138,14 @@ SELECT citus_shard_unset_isolated(:shardgroup_3_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}, {"10": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}, {"10": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}, {"10": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}, {"10": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}, {"10": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}, {"10": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] (3 rows) SELECT shardids[1] AS shardgroup_10_shardid @@ -158,14 +158,14 @@ SELECT citus_shard_unset_isolated(:shardgroup_10_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] (3 rows) SELECT shardids[1] AS shardgroup_5_shardid @@ -178,14 +178,14 @@ SELECT citus_shard_set_isolated(:shardgroup_5_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] (3 rows) -- test metadata sync @@ -234,14 +234,14 @@ SELECT citus_shard_set_isolated(:shardgroup_5_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_3": [true]}]}] - [{"5": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_3": [true]}]}] - [{"5": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_3": [true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] (3 rows) CREATE TABLE dist_4(a int); @@ -263,14 +263,14 @@ DETAIL: UPDATE and DELETE commands on the relation will error out during create -- Placements of a new distributed table created within the same colocated -- group inherit needsisolatednode from the colocated placements too. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_3": [true]}, {"isolate_placement.dist_4": [true]}, {"isolate_placement.dist_4_concurrently": [true]}]}] - [{"5": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_3": [true]}, {"isolate_placement.dist_4": [true]}, {"isolate_placement.dist_4_concurrently": [true]}]}] - [{"5": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_3": [true]}, {"isolate_placement.dist_4": [true]}, {"isolate_placement.dist_4_concurrently": [true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}, {"isolate_placement.dist_4": true}, {"isolate_placement.dist_4_concurrently": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}, {"isolate_placement.dist_4": true}, {"isolate_placement.dist_4_concurrently": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}, {"isolate_placement.dist_4": true}, {"isolate_placement.dist_4_concurrently": true}]}] (3 rows) DROP TABLE dist_4, dist_4_concurrently; @@ -344,14 +344,14 @@ FROM get_candidate_node_for_shard_transfer(:shardgroup_15_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] - [{"5": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}, {"isolate_placement.dist_3": [true, true]}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] + [{"5": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_3": true}]}] (3 rows) DROP TABLE dist_1, dist_2, dist_3; @@ -379,14 +379,14 @@ SELECT citus_shard_set_isolated(:shardgroup_3_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"3": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"3": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"3": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] + [{"3": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"3": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"3": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] (3 rows) -- so that replicate_table_shards works @@ -400,14 +400,14 @@ SELECT replicate_table_shards('isolate_placement.dist_1', shard_replication_fact SET client_min_messages TO NOTICE; SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"3": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}]}] - [{"3": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}]}] - [{"3": [{"isolate_placement.dist_1": [true, true]}, {"isolate_placement.dist_2": [true, true]}]}] + [{"3": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"3": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"3": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] (3 rows) DROP TABLE dist_1, dist_2; @@ -435,14 +435,14 @@ SELECT citus_shard_set_isolated(:shardgroup_9_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"9": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"9": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"9": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] + [{"9": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"9": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"9": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] (3 rows) SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset @@ -462,7 +462,7 @@ WHERE shardid = :shardgroup_9_shardid; -- We shouldn't see shard group 9 because shard-split operation doesn't -- preserve needsisolatednode flag when splitting the shard. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; result @@ -482,14 +482,14 @@ SELECT citus_shard_set_isolated(:shardgroup_12_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"12": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"12": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"12": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] + [{"12": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"12": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"12": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] (3 rows) SELECT shardids[1] AS shardgroup_10_shardid @@ -510,14 +510,14 @@ WHERE shardid = :shardgroup_10_shardid; -- We should see old shard group 12 (now as 13 due to split -- of a prior shard) because it's not the one we splitted. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"13": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"13": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] - [{"13": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}]}] + [{"13": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"13": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] + [{"13": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}]}] (3 rows) CREATE TABLE dist_3(a int); @@ -537,14 +537,14 @@ SELECT citus_shard_set_isolated(:shardgroup_17_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"17": [{"isolate_placement.dist_3": [true]}]}] - [{"17": [{"isolate_placement.dist_3": [true]}]}] - [{"17": [{"isolate_placement.dist_3": [true]}]}] + [{"17": [{"isolate_placement.dist_3": true}]}] + [{"17": [{"isolate_placement.dist_3": true}]}] + [{"17": [{"isolate_placement.dist_3": true}]}] (3 rows) -- verify that shard key value 100 is stored on shard group 17 @@ -563,7 +563,7 @@ SELECT 1 FROM isolate_tenant_to_new_shard('dist_3', 100, shard_transfer_mode => -- We shouldn't see shard group 17 because isolate_tenant_to_new_shard doesn't -- preserve needsisolatednode flag when splitting the shard. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; result @@ -583,14 +583,14 @@ SELECT citus_shard_set_isolated(:shardgroup_18_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"18": [{"isolate_placement.dist_3": [true]}]}] - [{"18": [{"isolate_placement.dist_3": [true]}]}] - [{"18": [{"isolate_placement.dist_3": [true]}]}] + [{"18": [{"isolate_placement.dist_3": true}]}] + [{"18": [{"isolate_placement.dist_3": true}]}] + [{"18": [{"isolate_placement.dist_3": true}]}] (3 rows) -- verify that shard key value 1000 is _not_ stored on shard group 18 @@ -609,14 +609,14 @@ SELECT 1 FROM isolate_tenant_to_new_shard('dist_3', 1000, shard_transfer_mode => -- We should see shard group 18 (now as 20 due to split of a prior shard) -- because it's not the one we splitted. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"20": [{"isolate_placement.dist_3": [true]}]}] - [{"20": [{"isolate_placement.dist_3": [true]}]}] - [{"20": [{"isolate_placement.dist_3": [true]}]}] + [{"20": [{"isolate_placement.dist_3": true}]}] + [{"20": [{"isolate_placement.dist_3": true}]}] + [{"20": [{"isolate_placement.dist_3": true}]}] (3 rows) CREATE TABLE single_shard_1(a int); @@ -643,14 +643,14 @@ SELECT create_distributed_table('single_shard_2', null, colocate_with=>'single_s (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.single_shard_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.single_shard_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"1": [{"isolate_placement.single_shard_1": [true]}, {"isolate_placement.single_shard_2": [true]}]}] - [{"1": [{"isolate_placement.single_shard_1": [true]}, {"isolate_placement.single_shard_2": [true]}]}] - [{"1": [{"isolate_placement.single_shard_1": [true]}, {"isolate_placement.single_shard_2": [true]}]}] + [{"1": [{"isolate_placement.single_shard_1": true}, {"isolate_placement.single_shard_2": true}]}] + [{"1": [{"isolate_placement.single_shard_1": true}, {"isolate_placement.single_shard_2": true}]}] + [{"1": [{"isolate_placement.single_shard_1": true}, {"isolate_placement.single_shard_2": true}]}] (3 rows) -- test invalid input @@ -910,14 +910,14 @@ SELECT create_distributed_table('range_table_post', 'a', 'range'); CALL public.create_range_partitioned_shards('range_table_post', '{"0","25"}','{"26","50"}'); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"1": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_post_colocated": [true]}, {"isolate_placement.dist_post_concurrently_colocated": [true]}]}] - [{"1": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_post_colocated": [true]}, {"isolate_placement.dist_post_concurrently_colocated": [true]}]}] - [{"1": [{"isolate_placement.dist_1": [true]}, {"isolate_placement.dist_2": [true]}, {"isolate_placement.dist_post_colocated": [true]}, {"isolate_placement.dist_post_concurrently_colocated": [true]}]}] + [{"1": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_post_colocated": true}, {"isolate_placement.dist_post_concurrently_colocated": true}]}] + [{"1": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_post_colocated": true}, {"isolate_placement.dist_post_concurrently_colocated": true}]}] + [{"1": [{"isolate_placement.dist_1": true}, {"isolate_placement.dist_2": true}, {"isolate_placement.dist_post_colocated": true}, {"isolate_placement.dist_post_concurrently_colocated": true}]}] (3 rows) -- Make sure that the node that contains shard-group 1 of isolate_placement.dist_1 diff --git a/src/test/regress/expected/isolation_create_distributed_table_concurrently.out b/src/test/regress/expected/isolation_create_distributed_table_concurrently.out index d15e119c0..edc232518 100644 --- a/src/test/regress/expected/isolation_create_distributed_table_concurrently.out +++ b/src/test/regress/expected/isolation_create_distributed_table_concurrently.out @@ -57,12 +57,12 @@ step s2-print-status: -- sanity check on total elements in the table SELECT COUNT(*) FROM table_1; -logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue +logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue|needsisolatednode --------------------------------------------------------------------- -table_1 |1400294|t | -2147483648| -1073741825 -table_1 |1400295|t | -1073741824| -1 -table_1 |1400296|t | 0| 1073741823 -table_1 |1400297|t | 1073741824| 2147483647 +table_1 |1400294|t | -2147483648| -1073741825|f +table_1 |1400295|t | -1073741824| -1|f +table_1 |1400296|t | 0| 1073741823|f +table_1 |1400297|t | 1073741824| 2147483647|f (4 rows) count @@ -125,12 +125,12 @@ step s2-print-status: -- sanity check on total elements in the table SELECT COUNT(*) FROM table_1; -logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue +logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue|needsisolatednode --------------------------------------------------------------------- -table_1 |1400299|t | -2147483648| -1073741825 -table_1 |1400300|t | -1073741824| -1 -table_1 |1400301|t | 0| 1073741823 -table_1 |1400302|t | 1073741824| 2147483647 +table_1 |1400299|t | -2147483648| -1073741825|f +table_1 |1400300|t | -1073741824| -1|f +table_1 |1400301|t | 0| 1073741823|f +table_1 |1400302|t | 1073741824| 2147483647|f (4 rows) count @@ -193,12 +193,12 @@ step s2-print-status: -- sanity check on total elements in the table SELECT COUNT(*) FROM table_1; -logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue +logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue|needsisolatednode --------------------------------------------------------------------- -table_1 |1400304|t | -2147483648| -1073741825 -table_1 |1400305|t | -1073741824| -1 -table_1 |1400306|t | 0| 1073741823 -table_1 |1400307|t | 1073741824| 2147483647 +table_1 |1400304|t | -2147483648| -1073741825|f +table_1 |1400305|t | -1073741824| -1|f +table_1 |1400306|t | 0| 1073741823|f +table_1 |1400307|t | 1073741824| 2147483647|f (4 rows) count @@ -261,12 +261,12 @@ step s2-print-status: -- sanity check on total elements in the table SELECT COUNT(*) FROM table_1; -logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue +logicalrelid|shardid|shardstorage|shardminvalue|shardmaxvalue|needsisolatednode --------------------------------------------------------------------- -table_1 |1400309|t | -2147483648| -1073741825 -table_1 |1400310|t | -1073741824| -1 -table_1 |1400311|t | 0| 1073741823 -table_1 |1400312|t | 1073741824| 2147483647 +table_1 |1400309|t | -2147483648| -1073741825|f +table_1 |1400310|t | -1073741824| -1|f +table_1 |1400311|t | 0| 1073741823|f +table_1 |1400312|t | 1073741824| 2147483647|f (4 rows) count diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 85eff3c86..60d95be24 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -467,7 +467,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -482,7 +482,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; -- ok, now add the table to the pg_dist_partition @@ -541,7 +541,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Invalid shard id: -1 ROLLBACK; -- invalid storage types are not allowed @@ -556,7 +556,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Invalid shard storage type: X ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -571,7 +571,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; -- non-integer shard ranges are not allowed @@ -586,7 +586,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: invalid input syntax for type integer: "non-int" ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -601,7 +601,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: shardMinValue=-1610612737 is greater than shardMaxValue=-2147483648 for table "test_2", which is not allowed ROLLBACK; -- we do not allow overlapping shards for the same table @@ -618,7 +618,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Shard intervals overlap for table "test_2": 1420001 and 1420000 ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -777,7 +777,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Metadata syncing is only allowed for hash, reference and local tables: X ROLLBACK; -- we do not allow NULL shardMinMax values @@ -794,7 +794,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; citus_internal_add_shard_metadata --------------------------------------------------------------------- @@ -804,7 +804,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -827,7 +827,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; citus_internal_add_shard_metadata --------------------------------------------------------------------- @@ -868,7 +868,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; citus_internal_add_shard_metadata --------------------------------------------------------------------- @@ -891,7 +891,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: Shards of reference or local table "test_ref" should have NULL shard ranges ROLLBACK; -- reference tables cannot have multiple shards @@ -907,7 +907,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ERROR: relation "test_ref" has already at least one shard, adding more is not allowed ROLLBACK; -- finally, add a shard for reference tables @@ -922,7 +922,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; citus_internal_add_shard_metadata --------------------------------------------------------------------- @@ -943,7 +943,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; citus_internal_add_shard_metadata --------------------------------------------------------------------- @@ -963,9 +963,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardlength, groupid, placementid) AS - (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS + (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- invalid placementid @@ -980,7 +980,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Shard placement has invalid placement id (-10) for shard(1420000) ROLLBACK; -- non-existing shard @@ -995,7 +995,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -1010,7 +1010,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ROLLBACK; -- create a volatile function that returns the local node id @@ -1041,7 +1041,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: duplicate key value violates unique constraint "placement_shardid_groupid_unique_index" ROLLBACK; -- shard is not owned by us @@ -1056,7 +1056,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: must be owner of table super_user_table ROLLBACK; -- sucessfully add placements @@ -1082,7 +1082,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; citus_internal_add_placement_metadata --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index 917369de2..e271f3dfe 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -1,5 +1,3 @@ -CREATE SCHEMA multi_create_shards; -SET search_path TO multi_create_shards; SET citus.next_shard_id TO 370000; -- =================================================================== -- create test functions and types needed for tests @@ -165,5 +163,3 @@ SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size 613566759 (7 rows) -SET client_min_messages TO WARNING; -DROP SCHEMA multi_create_shards CASCADE; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 94b97171e..f5ebd68fb 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -60,7 +60,7 @@ BEGIN FROM current_objects c FULL JOIN prev_objects p ON p.description = c.description WHERE (p.description is null OR c.description is null) - AND c.description IS DISTINCT FROM 'function any_value(anyelement) anyelement' + AND c.description IS DISTINCT FROM 'function any_value(anyelement) anyelement' AND c.description IS DISTINCT FROM 'function any_value_agg(anyelement,anyelement) anyelement'; DROP TABLE prev_objects; @@ -1420,14 +1420,13 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 12.2-1 ALTER EXTENSION citus UPDATE TO '12.2-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- - function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) void | - | function citus_internal_add_placement_metadata(bigint,bigint,integer,bigint,boolean) void - | function citus_internal_shard_group_set_needsisolatednode(bigint,boolean) void - | function citus_shard_set_isolated(bigint) void - | function citus_shard_unset_isolated(bigint) void -(5 rows) + | function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text,boolean) void + | function citus_internal_shard_group_set_needsisolatednode(bigint,boolean) void + | function citus_shard_set_isolated(bigint) void + | function citus_shard_unset_isolated(bigint) void +(4 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index ebad0ecac..a9e856785 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -5,7 +5,6 @@ -- https://github.com/citusdata/citus/issues/5138 --------------------------------------------------------------------- SET citus.next_shard_id TO 910000; -SET citus.next_placement_id TO 910000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA fix_idx_names; SET search_path TO fix_idx_names, public; @@ -455,7 +454,6 @@ SET ROLE user1; SELECT fix_partition_shard_index_names('fix_idx_names.dist_partitioned_table'::regclass); ERROR: permission denied for schema fix_idx_names RESET ROLE; -DROP ROLE user1; SET search_path TO fix_idx_names, public; DROP TABLE dist_partitioned_table; -- We can do any further operations (e.g. rename) on the indexes of partitions because @@ -493,7 +491,6 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' A (2 rows) \c - - - :master_port -SET citus.next_placement_id TO 920000; SET search_path TO fix_idx_names, public; DROP TABLE dist_partitioned_table; SET citus.next_shard_id TO 910040; @@ -637,24 +634,40 @@ ALTER INDEX p1_dist_col_idx3 RENAME TO p1_dist_col_idx3_renamed; ALTER INDEX p1_pkey RENAME TO p1_pkey_renamed; ALTER INDEX p1_dist_col_partition_col_key RENAME TO p1_dist_col_partition_col_key_renamed; ALTER INDEX p1_dist_col_idx RENAME TO p1_dist_col_idx_renamed; --- create columnar extension idempotently -SET client_min_messages TO WARNING; -CREATE EXTENSION IF NOT EXISTS citus_columnar; -RESET client_min_messages; -- should be able to create a new partition that is columnar SET citus.log_remote_commands TO ON; CREATE TABLE p2(dist_col int NOT NULL, another_col int, partition_col timestamp NOT NULL, name text) USING columnar; ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION "x.y-z"; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION "x.y-z"; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ALTER TABLE fix_idx_names.p2_915002 SET (columnar.chunk_group_row_limit = 10000, columnar.stripe_row_limit = 150000, columnar.compression_level = 3, columnar.compression = 'zstd'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'ALTER TABLE fix_idx_names.p2 OWNER TO postgres') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' @@ -683,13 +696,13 @@ NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index 27fdc3980..6a948c81e 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -45,12 +45,12 @@ select * from pg_dist_shard where logicalrelid='lineitem'::regclass or logicalrelid='orders'::regclass order by shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem | 360000 | t | -2147483648 | -1 - lineitem | 360001 | t | 0 | 2147483647 - orders | 360002 | t | -2147483648 | -1 - orders | 360003 | t | 0 | 2147483647 + lineitem | 360000 | t | -2147483648 | -1 | f + lineitem | 360001 | t | 0 | 2147483647 | f + orders | 360002 | t | -2147483648 | -1 | f + orders | 360003 | t | 0 | 2147483647 | f (4 rows) set client_min_messages to debug3; @@ -140,7 +140,7 @@ DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U explain statements for distributed queries are not enabled (3 rows) -SET client_min_messages TO WARNING; +SET client_min_messages TO WARNING; DROP TABLE varchar_partitioned_table; DROP TABLE array_partitioned_table; DROP TABLE composite_partitioned_table; diff --git a/src/test/regress/expected/multi_metadata_attributes.out b/src/test/regress/expected/multi_metadata_attributes.out index 0e290cc68..897980f6d 100644 --- a/src/test/regress/expected/multi_metadata_attributes.out +++ b/src/test/regress/expected/multi_metadata_attributes.out @@ -11,7 +11,7 @@ WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass, 'pg_dist_partition'::regclass, 'pg_dist_object'::regclass, 'pg_dist_background_task'::regclass, - 'pg_dist_placement'::regclass) + 'pg_dist_shard'::regclass) ORDER BY attrelid, attname; attrelid | attname | atthasmissing | attmissingval --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 0e5f8b950..5925ed4bb 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -133,7 +133,7 @@ INSERT INTO single_shard_tbl VALUES (1); reset citus.shard_replication_factor; -- Show that the created MX table is and its sequences are included in the activate node snapshot SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres @@ -192,10 +192,10 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310008, 0, 2, 100008, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL, false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (61 rows) -- Drop single shard table @@ -203,7 +203,7 @@ DROP TABLE single_shard_tbl; -- Show that CREATE INDEX commands are included in the activate node snapshot CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres @@ -255,15 +255,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot CREATE SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -317,8 +317,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -331,7 +331,7 @@ SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append'); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -385,14 +385,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -446,8 +446,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -531,22 +531,22 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (5 rows) SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted + logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 + mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 | f + mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 | f + mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 | f + mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 | f + mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 | f + mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 | f + mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 | f + mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 | f (8 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; @@ -670,22 +670,22 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (5 rows) SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted + logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 + mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 | f + mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 | f + mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 | f + mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 | f + mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 | f + mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 | f + mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 | f + mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 | f (8 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; @@ -1021,7 +1021,7 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%'; (0 rows) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%'; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- (0 rows) @@ -1681,7 +1681,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; ERROR: relation "mx_ref_index" does not exist SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- (0 rows) @@ -1942,7 +1942,7 @@ ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1'); ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT; ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1'); SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -2050,18 +2050,18 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 5, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 5, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 5, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 5, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310022, 0, 1, 100022, false), (1310023, 0, 5, 100023, false), (1310024, 0, 1, 100024, false), (1310025, 0, 5, 100025, false), (1310026, 0, 1, 100026, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310027, 0, 1, 100027, false), (1310028, 0, 5, 100028, false), (1310029, 0, 1, 100029, false), (1310030, 0, 5, 100030, false), (1310031, 0, 1, 100031, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310075, 0, 0, 100077, false), (1310075, 0, 1, 100078, false), (1310075, 0, 5, 100079, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310076, 0, 1, 100080, false), (1310077, 0, 5, 100081, false), (1310078, 0, 1, 100082, false), (1310079, 0, 5, 100083, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310085, 0, 1, 100091, false), (1310086, 0, 5, 100092, false), (1310087, 0, 1, 100093, false), (1310088, 0, 5, 100094, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825', false), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1', false), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823', false), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL, false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825', false), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1', false), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823', false), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out index d3e448c47..3334cdc5b 100644 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ b/src/test/regress/expected/multi_metadata_sync_0.out @@ -133,7 +133,7 @@ INSERT INTO single_shard_tbl VALUES (1); reset citus.shard_replication_factor; -- Show that the created MX table is and its sequences are included in the activate node snapshot SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres @@ -192,10 +192,10 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310008, 0, 2, 100008, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL, false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (61 rows) -- Drop single shard table @@ -203,7 +203,7 @@ DROP TABLE single_shard_tbl; -- Show that CREATE INDEX commands are included in the activate node snapshot CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres @@ -255,15 +255,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot CREATE SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -317,8 +317,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -331,7 +331,7 @@ SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append'); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -385,14 +385,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -446,8 +446,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 2, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 2, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 2, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 2, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -537,16 +537,16 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 + mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 | f + mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 | f + mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 | f + mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 | f + mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 | f + mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 | f + mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 | f + mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 | f (8 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; @@ -676,16 +676,16 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 + mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 | f + mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 | f + mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 | f + mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 | f + mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 | f + mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 | f + mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 | f + mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 | f (8 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; @@ -1021,7 +1021,7 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%'; (0 rows) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%'; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- (0 rows) @@ -1681,7 +1681,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_ref_index'::regclass; ERROR: relation "mx_ref_index" does not exist SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- (0 rows) @@ -1942,7 +1942,7 @@ ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1'); ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT; ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1'); SELECT unnest(activate_node_snapshot()) order by 1; - unnest + unnest --------------------------------------------------------------------- ALTER DATABASE regression OWNER TO postgres; ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres @@ -2050,18 +2050,18 @@ SELECT unnest(activate_node_snapshot()) order by 1; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310000, 0, 1, 100000, false), (1310001, 0, 5, 100001, false), (1310002, 0, 1, 100002, false), (1310003, 0, 5, 100003, false), (1310004, 0, 1, 100004, false), (1310005, 0, 5, 100005, false), (1310006, 0, 1, 100006, false), (1310007, 0, 5, 100007, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310022, 0, 1, 100022, false), (1310023, 0, 5, 100023, false), (1310024, 0, 1, 100024, false), (1310025, 0, 5, 100025, false), (1310026, 0, 1, 100026, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310027, 0, 1, 100027, false), (1310028, 0, 5, 100028, false), (1310029, 0, 1, 100029, false), (1310030, 0, 5, 100030, false), (1310031, 0, 1, 100031, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310075, 0, 0, 100077, false), (1310075, 0, 1, 100078, false), (1310075, 0, 5, 100079, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310076, 0, 1, 100080, false), (1310077, 0, 5, 100081, false), (1310078, 0, 1, 100082, false), (1310079, 0, 5, 100083, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid, needsisolatednode) AS (VALUES (1310085, 0, 1, 100091, false), (1310086, 0, 5, 100092, false), (1310087, 0, 1, 100093, false), (1310088, 0, 5, 100094, false)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, needsisolatednode) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187', false), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187', false), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737', false), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825', false), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913', false), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1', false), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911', false), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823', false), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735', false), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825', false), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1', false), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823', false), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL, false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825', false), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1', false), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823', false), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647', false)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, needsisolatednode) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_row_insert.out b/src/test/regress/expected/multi_row_insert.out index caa149ce2..da9c73ff6 100644 --- a/src/test/regress/expected/multi_row_insert.out +++ b/src/test/regress/expected/multi_row_insert.out @@ -12,10 +12,10 @@ SELECT create_distributed_table('source_table_xyz', 'key', 'range'); CALL public.create_range_partitioned_shards('source_table_xyz', '{"(0,a)","(25,z)"}','{"(24,a)","(49,z)"}'); SELECT * FROM pg_dist_shard WHERE logicalrelid='source_table_xyz'::regclass::oid ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - source_table_xyz | 4213581 | t | (0,a) | (24,a) - source_table_xyz | 4213582 | t | (25,z) | (49,z) + source_table_xyz | 4213581 | t | (0,a) | (24,a) | f + source_table_xyz | 4213582 | t | (25,z) | (49,z) | f (2 rows) SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement WHERE EXISTS(SELECT shardid FROM pg_dist_shard WHERE shardid=pg_dist_shard_placement.shardid AND logicalrelid='source_table_xyz'::regclass::oid) ORDER BY 1, 2, 3; diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index 2db4a7797..9fa53faef 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -65,7 +65,7 @@ SELECT * FROM pg_dist_partition; (0 rows) SELECT * FROM pg_dist_shard; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/multi_tenant_isolation.out b/src/test/regress/expected/multi_tenant_isolation.out index 5af7acac8..b3310e4c7 100644 --- a/src/test/regress/expected/multi_tenant_isolation.out +++ b/src/test/regress/expected/multi_tenant_isolation.out @@ -168,12 +168,12 @@ SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230000 | t | -2147483648 | -1 - orders_streaming | 1230002 | t | -2147483648 | -1 - lineitem_streaming | 1230001 | t | 0 | 2147483647 - orders_streaming | 1230003 | t | 0 | 2147483647 + lineitem_streaming | 1230000 | t | -2147483648 | -1 | f + orders_streaming | 1230002 | t | -2147483648 | -1 | f + lineitem_streaming | 1230001 | t | 0 | 2147483647 | f + orders_streaming | 1230003 | t | 0 | 2147483647 | f (4 rows) -- check without cascade option @@ -387,32 +387,32 @@ SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 + lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 | f + orders_streaming | 1230042 | t | -2147483648 | -2147483648 | f + lineitem_streaming | 1230041 | t | -2147483647 | -136164586 | f + orders_streaming | 1230043 | t | -2147483647 | -136164586 | f + lineitem_streaming | 1230035 | t | -136164585 | -136164585 | f + orders_streaming | 1230038 | t | -136164585 | -136164585 | f + lineitem_streaming | 1230036 | t | -136164584 | -85071815 | f + orders_streaming | 1230039 | t | -136164584 | -85071815 | f + lineitem_streaming | 1230011 | t | -85071814 | -85071814 | f + orders_streaming | 1230014 | t | -85071814 | -85071814 | f + lineitem_streaming | 1230012 | t | -85071813 | -1 | f + orders_streaming | 1230015 | t | -85071813 | -1 | f + lineitem_streaming | 1230004 | t | 0 | 108199380 | f + orders_streaming | 1230007 | t | 0 | 108199380 | f + lineitem_streaming | 1230005 | t | 108199381 | 108199381 | f + orders_streaming | 1230008 | t | 108199381 | 108199381 | f + lineitem_streaming | 1230028 | t | 108199382 | 412880111 | f + orders_streaming | 1230031 | t | 108199382 | 412880111 | f + lineitem_streaming | 1230029 | t | 412880112 | 412880112 | f + orders_streaming | 1230032 | t | 412880112 | 412880112 | f + lineitem_streaming | 1230044 | t | 412880113 | 2147483646 | f + orders_streaming | 1230046 | t | 412880113 | 2147483646 | f + lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 | f + orders_streaming | 1230047 | t | 2147483647 | 2147483647 | f (24 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid BETWEEN 1230000 AND 1399999 ORDER BY nodeport, shardid; @@ -541,32 +541,32 @@ SET search_path to "Tenant Isolation"; SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 + lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 | f + orders_streaming | 1230042 | t | -2147483648 | -2147483648 | f + lineitem_streaming | 1230041 | t | -2147483647 | -136164586 | f + orders_streaming | 1230043 | t | -2147483647 | -136164586 | f + lineitem_streaming | 1230035 | t | -136164585 | -136164585 | f + orders_streaming | 1230038 | t | -136164585 | -136164585 | f + lineitem_streaming | 1230036 | t | -136164584 | -85071815 | f + orders_streaming | 1230039 | t | -136164584 | -85071815 | f + lineitem_streaming | 1230011 | t | -85071814 | -85071814 | f + orders_streaming | 1230014 | t | -85071814 | -85071814 | f + lineitem_streaming | 1230012 | t | -85071813 | -1 | f + orders_streaming | 1230015 | t | -85071813 | -1 | f + lineitem_streaming | 1230004 | t | 0 | 108199380 | f + orders_streaming | 1230007 | t | 0 | 108199380 | f + lineitem_streaming | 1230005 | t | 108199381 | 108199381 | f + orders_streaming | 1230008 | t | 108199381 | 108199381 | f + lineitem_streaming | 1230028 | t | 108199382 | 412880111 | f + orders_streaming | 1230031 | t | 108199382 | 412880111 | f + lineitem_streaming | 1230029 | t | 412880112 | 412880112 | f + orders_streaming | 1230032 | t | 412880112 | 412880112 | f + lineitem_streaming | 1230044 | t | 412880113 | 2147483646 | f + orders_streaming | 1230046 | t | 412880113 | 2147483646 | f + lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 | f + orders_streaming | 1230047 | t | 2147483647 | 2147483647 | f (24 rows) -- return to master node @@ -684,32 +684,32 @@ ERROR: cannot isolate tenant because tenant isolation is only support for hash SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 + lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 | f + orders_streaming | 1230042 | t | -2147483648 | -2147483648 | f + lineitem_streaming | 1230041 | t | -2147483647 | -136164586 | f + orders_streaming | 1230043 | t | -2147483647 | -136164586 | f + lineitem_streaming | 1230035 | t | -136164585 | -136164585 | f + orders_streaming | 1230038 | t | -136164585 | -136164585 | f + lineitem_streaming | 1230036 | t | -136164584 | -85071815 | f + orders_streaming | 1230039 | t | -136164584 | -85071815 | f + lineitem_streaming | 1230011 | t | -85071814 | -85071814 | f + orders_streaming | 1230014 | t | -85071814 | -85071814 | f + lineitem_streaming | 1230012 | t | -85071813 | -1 | f + orders_streaming | 1230015 | t | -85071813 | -1 | f + lineitem_streaming | 1230004 | t | 0 | 108199380 | f + orders_streaming | 1230007 | t | 0 | 108199380 | f + lineitem_streaming | 1230005 | t | 108199381 | 108199381 | f + orders_streaming | 1230008 | t | 108199381 | 108199381 | f + lineitem_streaming | 1230028 | t | 108199382 | 412880111 | f + orders_streaming | 1230031 | t | 108199382 | 412880111 | f + lineitem_streaming | 1230029 | t | 412880112 | 412880112 | f + orders_streaming | 1230032 | t | 412880112 | 412880112 | f + lineitem_streaming | 1230044 | t | 412880113 | 2147483646 | f + orders_streaming | 1230046 | t | 412880113 | 2147483646 | f + lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 | f + orders_streaming | 1230047 | t | 2147483647 | 2147483647 | f (24 rows) \c - postgres - :master_port diff --git a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out index 3ec16e6ee..3e6506c21 100644 --- a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out @@ -168,12 +168,12 @@ SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230000 | t | -2147483648 | -1 - orders_streaming | 1230002 | t | -2147483648 | -1 - lineitem_streaming | 1230001 | t | 0 | 2147483647 - orders_streaming | 1230003 | t | 0 | 2147483647 + lineitem_streaming | 1230000 | t | -2147483648 | -1 | f + orders_streaming | 1230002 | t | -2147483648 | -1 | f + lineitem_streaming | 1230001 | t | 0 | 2147483647 | f + orders_streaming | 1230003 | t | 0 | 2147483647 | f (4 rows) -- check without cascade option @@ -399,32 +399,32 @@ SELECT count(*) FROM orders_streaming WHERE o_orderkey = 103; SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 + lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 | f + orders_streaming | 1230042 | t | -2147483648 | -2147483648 | f + lineitem_streaming | 1230041 | t | -2147483647 | -136164586 | f + orders_streaming | 1230043 | t | -2147483647 | -136164586 | f + lineitem_streaming | 1230035 | t | -136164585 | -136164585 | f + orders_streaming | 1230038 | t | -136164585 | -136164585 | f + lineitem_streaming | 1230036 | t | -136164584 | -85071815 | f + orders_streaming | 1230039 | t | -136164584 | -85071815 | f + lineitem_streaming | 1230011 | t | -85071814 | -85071814 | f + orders_streaming | 1230014 | t | -85071814 | -85071814 | f + lineitem_streaming | 1230012 | t | -85071813 | -1 | f + orders_streaming | 1230015 | t | -85071813 | -1 | f + lineitem_streaming | 1230004 | t | 0 | 108199380 | f + orders_streaming | 1230007 | t | 0 | 108199380 | f + lineitem_streaming | 1230005 | t | 108199381 | 108199381 | f + orders_streaming | 1230008 | t | 108199381 | 108199381 | f + lineitem_streaming | 1230028 | t | 108199382 | 412880111 | f + orders_streaming | 1230031 | t | 108199382 | 412880111 | f + lineitem_streaming | 1230029 | t | 412880112 | 412880112 | f + orders_streaming | 1230032 | t | 412880112 | 412880112 | f + lineitem_streaming | 1230044 | t | 412880113 | 2147483646 | f + orders_streaming | 1230046 | t | 412880113 | 2147483646 | f + lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 | f + orders_streaming | 1230047 | t | 2147483647 | 2147483647 | f (24 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid BETWEEN 1230000 AND 1399999 ORDER BY nodeport, shardid; @@ -553,32 +553,32 @@ SET search_path to "Tenant Isolation"; SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 + lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 | f + orders_streaming | 1230042 | t | -2147483648 | -2147483648 | f + lineitem_streaming | 1230041 | t | -2147483647 | -136164586 | f + orders_streaming | 1230043 | t | -2147483647 | -136164586 | f + lineitem_streaming | 1230035 | t | -136164585 | -136164585 | f + orders_streaming | 1230038 | t | -136164585 | -136164585 | f + lineitem_streaming | 1230036 | t | -136164584 | -85071815 | f + orders_streaming | 1230039 | t | -136164584 | -85071815 | f + lineitem_streaming | 1230011 | t | -85071814 | -85071814 | f + orders_streaming | 1230014 | t | -85071814 | -85071814 | f + lineitem_streaming | 1230012 | t | -85071813 | -1 | f + orders_streaming | 1230015 | t | -85071813 | -1 | f + lineitem_streaming | 1230004 | t | 0 | 108199380 | f + orders_streaming | 1230007 | t | 0 | 108199380 | f + lineitem_streaming | 1230005 | t | 108199381 | 108199381 | f + orders_streaming | 1230008 | t | 108199381 | 108199381 | f + lineitem_streaming | 1230028 | t | 108199382 | 412880111 | f + orders_streaming | 1230031 | t | 108199382 | 412880111 | f + lineitem_streaming | 1230029 | t | 412880112 | 412880112 | f + orders_streaming | 1230032 | t | 412880112 | 412880112 | f + lineitem_streaming | 1230044 | t | 412880113 | 2147483646 | f + orders_streaming | 1230046 | t | 412880113 | 2147483646 | f + lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 | f + orders_streaming | 1230047 | t | 2147483647 | 2147483647 | f (24 rows) -- return to master node @@ -728,32 +728,32 @@ ERROR: cannot isolate tenant because tenant isolation is only support for hash SELECT * FROM pg_dist_shard WHERE logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass ORDER BY shardminvalue::BIGINT, logicalrelid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue + logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | needsisolatednode --------------------------------------------------------------------- - lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 - orders_streaming | 1230042 | t | -2147483648 | -2147483648 - lineitem_streaming | 1230041 | t | -2147483647 | -136164586 - orders_streaming | 1230043 | t | -2147483647 | -136164586 - lineitem_streaming | 1230035 | t | -136164585 | -136164585 - orders_streaming | 1230038 | t | -136164585 | -136164585 - lineitem_streaming | 1230036 | t | -136164584 | -85071815 - orders_streaming | 1230039 | t | -136164584 | -85071815 - lineitem_streaming | 1230011 | t | -85071814 | -85071814 - orders_streaming | 1230014 | t | -85071814 | -85071814 - lineitem_streaming | 1230012 | t | -85071813 | -1 - orders_streaming | 1230015 | t | -85071813 | -1 - lineitem_streaming | 1230004 | t | 0 | 108199380 - orders_streaming | 1230007 | t | 0 | 108199380 - lineitem_streaming | 1230005 | t | 108199381 | 108199381 - orders_streaming | 1230008 | t | 108199381 | 108199381 - lineitem_streaming | 1230028 | t | 108199382 | 412880111 - orders_streaming | 1230031 | t | 108199382 | 412880111 - lineitem_streaming | 1230029 | t | 412880112 | 412880112 - orders_streaming | 1230032 | t | 412880112 | 412880112 - lineitem_streaming | 1230044 | t | 412880113 | 2147483646 - orders_streaming | 1230046 | t | 412880113 | 2147483646 - lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 - orders_streaming | 1230047 | t | 2147483647 | 2147483647 + lineitem_streaming | 1230040 | t | -2147483648 | -2147483648 | f + orders_streaming | 1230042 | t | -2147483648 | -2147483648 | f + lineitem_streaming | 1230041 | t | -2147483647 | -136164586 | f + orders_streaming | 1230043 | t | -2147483647 | -136164586 | f + lineitem_streaming | 1230035 | t | -136164585 | -136164585 | f + orders_streaming | 1230038 | t | -136164585 | -136164585 | f + lineitem_streaming | 1230036 | t | -136164584 | -85071815 | f + orders_streaming | 1230039 | t | -136164584 | -85071815 | f + lineitem_streaming | 1230011 | t | -85071814 | -85071814 | f + orders_streaming | 1230014 | t | -85071814 | -85071814 | f + lineitem_streaming | 1230012 | t | -85071813 | -1 | f + orders_streaming | 1230015 | t | -85071813 | -1 | f + lineitem_streaming | 1230004 | t | 0 | 108199380 | f + orders_streaming | 1230007 | t | 0 | 108199380 | f + lineitem_streaming | 1230005 | t | 108199381 | 108199381 | f + orders_streaming | 1230008 | t | 108199381 | 108199381 | f + lineitem_streaming | 1230028 | t | 108199382 | 412880111 | f + orders_streaming | 1230031 | t | 108199382 | 412880111 | f + lineitem_streaming | 1230029 | t | 412880112 | 412880112 | f + orders_streaming | 1230032 | t | 412880112 | 412880112 | f + lineitem_streaming | 1230044 | t | 412880113 | 2147483646 | f + orders_streaming | 1230046 | t | 412880113 | 2147483646 | f + lineitem_streaming | 1230045 | t | 2147483647 | 2147483647 | f + orders_streaming | 1230047 | t | 2147483647 | 2147483647 | f (24 rows) \c - postgres - :master_port diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index ce95fd15f..d60f5bc8f 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -547,31 +547,29 @@ AS $func$ END; $func$ LANGUAGE plpgsql; -- Takes a table name and returns a json object for each shard group that --- contains a shard placement that needs an isolated node. +-- contains a shard whose placements need isolation. -- --- This does not only return the placements of input relation but also considers +-- This does not only return the shards of input relation but also considers -- all colocated relations. -- -- An example output is as follows: -- -- [ --- {"10": [{"dist_1": [true,true]},{"dist_2": [false,false]}]}, --- {"15": [{"dist_1": [false,false]},{"dist_3": [true,false]}]} +-- {"10": [{"dist_1": true},{"dist_2": false}]}, +-- {"15": [{"dist_1": false},{"dist_3": true}]} -- ] -- -- It only returned shard groups 10 and 15 because they are the only shard groups --- that contain at least one shard placement that needs an isolated node. +-- that contain at least one shard whose placements need an isolation. -- --- (Innermost) Boolean arrays represent needsisolatednode values for different --- placements of given shard. For example, +-- (Innermost) Boolean values represent needsisolatednode value for given +-- shard. For example, -- --- {"15": [{"dist_1": [false,false]},{"dist_3": [true,false]}]} +-- {"15": [{"dist_1": false},{"dist_3": true}]} -- --- means that the first shard placement of dist_3 within shard group 15 needs --- to be isolated but the other placement doesn't. Also, the first placement --- is on the node that has a lower groupid than the second one because we order --- them by groupid. -CREATE OR REPLACE FUNCTION get_colocated_placements_needisolatednode( +-- means that the placements of dist_3 within shard group 15 needs +-- to be isolated. +CREATE OR REPLACE FUNCTION get_colocated_shards_needisolatednode( qualified_table_name text) RETURNS SETOF jsonb AS $func$ BEGIN @@ -583,25 +581,17 @@ RETURNS SETOF jsonb AS $func$ ) AS result FROM ( SELECT shardgroupindex, - jsonb_agg(jsonb_build_object(logicalrelid, needsisolatednodearr) ORDER BY logicalrelid::text) AS needsisolatednodejson + jsonb_agg(jsonb_build_object(logicalrelid, needsisolatednode) ORDER BY logicalrelid::text) AS needsisolatednodejson FROM ( - SELECT logicalrelid, - shardgroupindex, - array_agg(needsisolatednode ORDER BY shardgroupnodegroupid) AS needsisolatednodearr - FROM ( - SELECT shardgroupindex, - groupid AS shardgroupnodegroupid, - logicalrelid, - needsisolatednode - FROM public.get_enumerated_shard_groups(qualified_table_name) AS shardgroups - JOIN pg_dist_placement - ON shardid = ANY(shardids) - JOIN pg_dist_shard USING(shardid) - ) q1 - GROUP BY logicalrelid, shardgroupindex - ) q2 + SELECT shardgroupindex, + logicalrelid, + needsisolatednode + FROM public.get_enumerated_shard_groups(qualified_table_name) AS shardgroups + JOIN pg_dist_shard + ON shardid = ANY(shardids) + ) q1 GROUP BY shardgroupindex - ) q3 + ) q2 WHERE needsisolatednodejson::text LIKE '%true%'; END; $func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index d15798a83..f5b76c14c 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -1153,16 +1153,16 @@ SELECT * FROM public.table_placements_per_node; CALL citus_cleanup_orphaned_resources(); select * from pg_dist_placement ORDER BY placementid; - placementid | shardid | shardstate | shardlength | groupid | needsisolatednode + placementid | shardid | shardstate | shardlength | groupid --------------------------------------------------------------------- - 138 | 123023 | 1 | 0 | 14 | f - 141 | 123024 | 1 | 0 | 14 | f - 144 | 123027 | 1 | 0 | 14 | f - 145 | 123028 | 1 | 0 | 14 | f - 146 | 123021 | 1 | 0 | 16 | f - 147 | 123025 | 1 | 0 | 16 | f - 148 | 123022 | 1 | 0 | 16 | f - 149 | 123026 | 1 | 0 | 16 | f + 138 | 123023 | 1 | 0 | 14 + 141 | 123024 | 1 | 0 | 14 + 144 | 123027 | 1 | 0 | 14 + 145 | 123028 | 1 | 0 | 14 + 146 | 123021 | 1 | 0 | 16 + 147 | 123025 | 1 | 0 | 16 + 148 | 123022 | 1 | 0 | 16 + 149 | 123026 | 1 | 0 | 16 (8 rows) -- Move all shards to worker1 again diff --git a/src/test/regress/expected/upgrade_isolate_placement_after.out b/src/test/regress/expected/upgrade_isolate_placement_after.out index 97e7a2b2d..655dcde46 100644 --- a/src/test/regress/expected/upgrade_isolate_placement_after.out +++ b/src/test/regress/expected/upgrade_isolate_placement_after.out @@ -5,14 +5,14 @@ ALTER SCHEMA public RENAME TO old_public; ALTER SCHEMA citus_schema RENAME TO public; SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') + SELECT * FROM public.get_colocated_shards_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": [true]}]}] - [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": [true]}]}] - [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": [true]}]}] + [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": true}]}] + [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": true}]}] + [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": true}]}] (3 rows) ALTER SCHEMA public RENAME TO citus_schema; diff --git a/src/test/regress/expected/upgrade_isolate_placement_before.out b/src/test/regress/expected/upgrade_isolate_placement_before.out index 6225e0c29..4eaaae631 100644 --- a/src/test/regress/expected/upgrade_isolate_placement_before.out +++ b/src/test/regress/expected/upgrade_isolate_placement_before.out @@ -20,13 +20,13 @@ SELECT citus_shard_set_isolated(:shardgroup_5_shardid); (1 row) SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') + SELECT * FROM public.get_colocated_shards_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') $$) ORDER BY nodeid; - result + result --------------------------------------------------------------------- - [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": [true]}]}] - [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": [true]}]}] - [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": [true]}]}] + [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": true}]}] + [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": true}]}] + [{"5": [{"upgrade_isolate_placement_before.table_with_isolated_placements": true}]}] (3 rows) diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index af1fb9df4..939023d33 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -67,8 +67,9 @@ ORDER BY 1; function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") function citus_internal_add_placement_metadata(bigint,bigint,integer,bigint) - function citus_internal_add_placement_metadata(bigint,bigint,integer,bigint,boolean) + function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) + function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text,boolean) function citus_internal_add_tenant_schema(oid,integer) function citus_internal_adjust_local_clock_to_remote(cluster_clock) function citus_internal_delete_colocation_metadata(integer) @@ -346,5 +347,5 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(336 rows) +(337 rows) diff --git a/src/test/regress/sql/isolate_placement.sql b/src/test/regress/sql/isolate_placement.sql index 375df24ce..2a88d729f 100644 --- a/src/test/regress/sql/isolate_placement.sql +++ b/src/test/regress/sql/isolate_placement.sql @@ -59,7 +59,7 @@ SET citus.shard_replication_factor TO 1; -- none of the placements have been marked as needsisolatednode yet SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -76,7 +76,7 @@ WHERE shardgroupindex = 10 \gset SELECT citus_shard_set_isolated(:shardgroup_10_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -87,7 +87,7 @@ WHERE shardgroupindex = 3 \gset SELECT citus_shard_unset_isolated(:shardgroup_3_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -98,7 +98,7 @@ WHERE shardgroupindex = 10 \gset SELECT citus_shard_unset_isolated(:shardgroup_10_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -109,7 +109,7 @@ WHERE shardgroupindex = 5 \gset SELECT citus_shard_set_isolated(:shardgroup_5_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -137,7 +137,7 @@ WHERE shardgroupindex = 5 \gset SELECT citus_shard_set_isolated(:shardgroup_5_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -150,7 +150,7 @@ SELECT create_distributed_table_concurrently('dist_4_concurrently', 'a', colocat -- Placements of a new distributed table created within the same colocated -- group inherit needsisolatednode from the colocated placements too. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -213,7 +213,7 @@ SELECT citus_copy_shard_placement(:shardgroup_15_shardid, source_nodeid, target_ FROM get_candidate_node_for_shard_transfer(:shardgroup_15_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -231,7 +231,7 @@ WHERE shardgroupindex = 3 \gset SELECT citus_shard_set_isolated(:shardgroup_3_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -243,7 +243,7 @@ SELECT replicate_table_shards('isolate_placement.dist_1', shard_replication_fact SET client_min_messages TO NOTICE; SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -261,7 +261,7 @@ WHERE shardgroupindex = 9 \gset SELECT citus_shard_set_isolated(:shardgroup_9_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -279,7 +279,7 @@ WHERE shardid = :shardgroup_9_shardid; -- We shouldn't see shard group 9 because shard-split operation doesn't -- preserve needsisolatednode flag when splitting the shard. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -290,7 +290,7 @@ WHERE shardgroupindex = 12 \gset SELECT citus_shard_set_isolated(:shardgroup_12_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -309,7 +309,7 @@ WHERE shardid = :shardgroup_10_shardid; -- We should see old shard group 12 (now as 13 due to split -- of a prior shard) because it's not the one we splitted. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; @@ -323,7 +323,7 @@ WHERE shardgroupindex = 17 \gset SELECT citus_shard_set_isolated(:shardgroup_17_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; @@ -335,7 +335,7 @@ SELECT 1 FROM isolate_tenant_to_new_shard('dist_3', 100, shard_transfer_mode => -- We shouldn't see shard group 17 because isolate_tenant_to_new_shard doesn't -- preserve needsisolatednode flag when splitting the shard. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; @@ -346,7 +346,7 @@ WHERE shardgroupindex = 18 \gset SELECT citus_shard_set_isolated(:shardgroup_18_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; @@ -358,7 +358,7 @@ SELECT 1 FROM isolate_tenant_to_new_shard('dist_3', 1000, shard_transfer_mode => -- We should see shard group 18 (now as 20 due to split of a prior shard) -- because it's not the one we splitted. SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_3') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_3') $$) ORDER BY nodeid; @@ -376,7 +376,7 @@ CREATE TABLE single_shard_2(a int); SELECT create_distributed_table('single_shard_2', null, colocate_with=>'single_shard_1'); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.single_shard_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.single_shard_1') $$) ORDER BY nodeid; @@ -499,7 +499,7 @@ SELECT create_distributed_table('range_table_post', 'a', 'range'); CALL public.create_range_partitioned_shards('range_table_post', '{"0","25"}','{"26","50"}'); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('isolate_placement.dist_1') + SELECT * FROM public.get_colocated_shards_needisolatednode('isolate_placement.dist_1') $$) ORDER BY nodeid; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index c2009fd2c..1c7cbd153 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -277,7 +277,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -287,7 +287,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- ok, now add the table to the pg_dist_partition @@ -313,7 +313,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- invalid storage types are not allowed @@ -323,7 +323,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -333,7 +333,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- non-integer shard ranges are not allowed @@ -343,7 +343,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -353,7 +353,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- we do not allow overlapping shards for the same table @@ -365,7 +365,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -483,7 +483,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- we do not allow NULL shardMinMax values @@ -495,12 +495,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; -- manually ingest NULL values, otherwise not likely unless metadata is corrupted UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -519,7 +519,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; COMMIT; -- we cannot mark these two tables colocated because they are not colocated @@ -540,7 +540,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; COMMIT; -- shardMin/MaxValues should be NULL for reference tables @@ -550,7 +550,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- reference tables cannot have multiple shards @@ -561,7 +561,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; ROLLBACK; -- finally, add a shard for reference tables @@ -571,7 +571,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; COMMIT; \c - postgres - :worker_1_port @@ -584,7 +584,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue, false) FROM shard_data; COMMIT; \c - metadata_sync_helper_role - :worker_1_port @@ -597,9 +597,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardlength, groupid, placementid) AS - (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS + (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- invalid placementid @@ -609,7 +609,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing shard @@ -619,7 +619,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -629,7 +629,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- create a volatile function that returns the local node id @@ -656,7 +656,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- shard is not owned by us @@ -666,7 +666,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- sucessfully add placements @@ -687,7 +687,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid, false) FROM placement_data; + SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; COMMIT; -- we should be able to colocate both tables now diff --git a/src/test/regress/sql/multi_create_shards.sql b/src/test/regress/sql/multi_create_shards.sql index 74029e947..160dd8511 100644 --- a/src/test/regress/sql/multi_create_shards.sql +++ b/src/test/regress/sql/multi_create_shards.sql @@ -1,5 +1,3 @@ -CREATE SCHEMA multi_create_shards; -SET search_path TO multi_create_shards; SET citus.next_shard_id TO 370000; @@ -117,6 +115,3 @@ SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid = 'weird_shard_count'::regclass ORDER BY shardminvalue::integer ASC; - -SET client_min_messages TO WARNING; -DROP SCHEMA multi_create_shards CASCADE; diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql index f43c8ae01..d0f789cd9 100644 --- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql +++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql @@ -5,7 +5,6 @@ -- https://github.com/citusdata/citus/issues/5138 ---------------------------------------------------- SET citus.next_shard_id TO 910000; -SET citus.next_placement_id TO 910000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA fix_idx_names; SET search_path TO fix_idx_names, public; @@ -241,7 +240,6 @@ SET ROLE user1; SELECT fix_partition_shard_index_names('fix_idx_names.dist_partitioned_table'::regclass); RESET ROLE; -DROP ROLE user1; SET search_path TO fix_idx_names, public; DROP TABLE dist_partitioned_table; @@ -275,7 +273,6 @@ DROP INDEX p_another_col_partition_col_idx; SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' AND tablename SIMILAR TO '%\_\d*' ORDER BY 1, 2; \c - - - :master_port -SET citus.next_placement_id TO 920000; SET search_path TO fix_idx_names, public; DROP TABLE dist_partitioned_table; SET citus.next_shard_id TO 910040; @@ -332,11 +329,6 @@ ALTER INDEX p1_pkey RENAME TO p1_pkey_renamed; ALTER INDEX p1_dist_col_partition_col_key RENAME TO p1_dist_col_partition_col_key_renamed; ALTER INDEX p1_dist_col_idx RENAME TO p1_dist_col_idx_renamed; --- create columnar extension idempotently -SET client_min_messages TO WARNING; -CREATE EXTENSION IF NOT EXISTS citus_columnar; -RESET client_min_messages; - -- should be able to create a new partition that is columnar SET citus.log_remote_commands TO ON; CREATE TABLE p2(dist_col int NOT NULL, another_col int, partition_col timestamp NOT NULL, name text) USING columnar; diff --git a/src/test/regress/sql/multi_metadata_attributes.sql b/src/test/regress/sql/multi_metadata_attributes.sql index cdb9fee3b..e2723574e 100644 --- a/src/test/regress/sql/multi_metadata_attributes.sql +++ b/src/test/regress/sql/multi_metadata_attributes.sql @@ -12,5 +12,5 @@ WHERE atthasmissing AND attrelid NOT IN ('pg_dist_node'::regclass, 'pg_dist_partition'::regclass, 'pg_dist_object'::regclass, 'pg_dist_background_task'::regclass, - 'pg_dist_placement'::regclass) + 'pg_dist_shard'::regclass) ORDER BY attrelid, attname; diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index f942bbc98..7a7e3d5b3 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -575,31 +575,29 @@ $func$ LANGUAGE plpgsql; -- Takes a table name and returns a json object for each shard group that --- contains a shard placement that needs an isolated node. +-- contains a shard whose placements need isolation. -- --- This does not only return the placements of input relation but also considers +-- This does not only return the shards of input relation but also considers -- all colocated relations. -- -- An example output is as follows: -- -- [ --- {"10": [{"dist_1": [true,true]},{"dist_2": [false,false]}]}, --- {"15": [{"dist_1": [false,false]},{"dist_3": [true,false]}]} +-- {"10": [{"dist_1": true},{"dist_2": false}]}, +-- {"15": [{"dist_1": false},{"dist_3": true}]} -- ] -- -- It only returned shard groups 10 and 15 because they are the only shard groups --- that contain at least one shard placement that needs an isolated node. +-- that contain at least one shard whose placements need an isolation. -- --- (Innermost) Boolean arrays represent needsisolatednode values for different --- placements of given shard. For example, +-- (Innermost) Boolean values represent needsisolatednode value for given +-- shard. For example, -- --- {"15": [{"dist_1": [false,false]},{"dist_3": [true,false]}]} +-- {"15": [{"dist_1": false},{"dist_3": true}]} -- --- means that the first shard placement of dist_3 within shard group 15 needs --- to be isolated but the other placement doesn't. Also, the first placement --- is on the node that has a lower groupid than the second one because we order --- them by groupid. -CREATE OR REPLACE FUNCTION get_colocated_placements_needisolatednode( +-- means that the placements of dist_3 within shard group 15 needs +-- to be isolated. +CREATE OR REPLACE FUNCTION get_colocated_shards_needisolatednode( qualified_table_name text) RETURNS SETOF jsonb AS $func$ BEGIN @@ -611,25 +609,17 @@ RETURNS SETOF jsonb AS $func$ ) AS result FROM ( SELECT shardgroupindex, - jsonb_agg(jsonb_build_object(logicalrelid, needsisolatednodearr) ORDER BY logicalrelid::text) AS needsisolatednodejson + jsonb_agg(jsonb_build_object(logicalrelid, needsisolatednode) ORDER BY logicalrelid::text) AS needsisolatednodejson FROM ( - SELECT logicalrelid, - shardgroupindex, - array_agg(needsisolatednode ORDER BY shardgroupnodegroupid) AS needsisolatednodearr - FROM ( - SELECT shardgroupindex, - groupid AS shardgroupnodegroupid, - logicalrelid, - needsisolatednode - FROM public.get_enumerated_shard_groups(qualified_table_name) AS shardgroups - JOIN pg_dist_placement - ON shardid = ANY(shardids) - JOIN pg_dist_shard USING(shardid) - ) q1 - GROUP BY logicalrelid, shardgroupindex - ) q2 + SELECT shardgroupindex, + logicalrelid, + needsisolatednode + FROM public.get_enumerated_shard_groups(qualified_table_name) AS shardgroups + JOIN pg_dist_shard + ON shardid = ANY(shardids) + ) q1 GROUP BY shardgroupindex - ) q3 + ) q2 WHERE needsisolatednodejson::text LIKE '%true%'; END; $func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/upgrade_isolate_placement_after.sql b/src/test/regress/sql/upgrade_isolate_placement_after.sql index 129cf116b..b096ee9aa 100644 --- a/src/test/regress/sql/upgrade_isolate_placement_after.sql +++ b/src/test/regress/sql/upgrade_isolate_placement_after.sql @@ -6,7 +6,7 @@ ALTER SCHEMA public RENAME TO old_public; ALTER SCHEMA citus_schema RENAME TO public; SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') + SELECT * FROM public.get_colocated_shards_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') $$) ORDER BY nodeid; diff --git a/src/test/regress/sql/upgrade_isolate_placement_before.sql b/src/test/regress/sql/upgrade_isolate_placement_before.sql index 21c531b1e..ba6884ed5 100644 --- a/src/test/regress/sql/upgrade_isolate_placement_before.sql +++ b/src/test/regress/sql/upgrade_isolate_placement_before.sql @@ -16,6 +16,6 @@ WHERE shardgroupindex = 5 \gset SELECT citus_shard_set_isolated(:shardgroup_5_shardid); SELECT result FROM run_command_on_all_nodes($$ - SELECT * FROM public.get_colocated_placements_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') + SELECT * FROM public.get_colocated_shards_needisolatednode('upgrade_isolate_placement_before.table_with_isolated_placements') $$) ORDER BY nodeid;