From e989e5b00edfac99d98e2370b0e6727bb8c094e0 Mon Sep 17 00:00:00 2001 From: Vinod Sridharan <14185211+visridha@users.noreply.github.com> Date: Wed, 3 Apr 2024 23:09:52 +0000 Subject: [PATCH] more --- .../distributed/operations/create_shards.c | 10 ++- .../expected/multi_mx_create_table.out | 20 +----- .../regress/expected/multi_router_planner.out | 67 ++++++++++++++++--- 3 files changed, 68 insertions(+), 29 deletions(-) diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index 27fb8c04f..fe26d9ed4 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -163,7 +163,15 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, /* set shard storage type according to relation type */ char shardStorageType = ShardStorageType(distributedTableId); - int64 shardOffset = shardCount == 1 ? colocationId : 0; + + int64 shardOffset = 0; + if (shardCount == 1 && shardStorageType == SHARD_STORAGE_TABLE) + { + /* For single shard distributed tables, use the colocationId to offset + * where the shard is placed. + */ + shardOffset = colocationId; + } for (int64 shardIndex = 0; shardIndex < shardCount; shardIndex++) { diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index b9d3f7faa..29b1d0430 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -540,12 +540,6 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR articles_hash_mx_1220104 | articles_hash_mx | distributed | 0 articles_hash_mx_1220105 | articles_hash_mx | distributed | 0 articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 - articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 - articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 - articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 - articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 - articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 - articles_single_shard_hash_mx_1220106 | articles_single_shard_hash_mx | distributed | 0 citus_mx_test_schema.nation_hash_1220016 | citus_mx_test_schema.nation_hash | distributed | 0 citus_mx_test_schema.nation_hash_1220016 | citus_mx_test_schema.nation_hash | distributed | 0 citus_mx_test_schema.nation_hash_1220016 | citus_mx_test_schema.nation_hash | distributed | 0 @@ -715,12 +709,6 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR customer_mx_1220084 | customer_mx | reference | 0 customer_mx_1220084 | customer_mx | reference | 0 labs_mx_1220102 | labs_mx | distributed | 8192 - labs_mx_1220102 | labs_mx | distributed | 8192 - labs_mx_1220102 | labs_mx | distributed | 8192 - labs_mx_1220102 | labs_mx | distributed | 8192 - labs_mx_1220102 | labs_mx | distributed | 8192 - labs_mx_1220102 | labs_mx | distributed | 8192 - labs_mx_1220102 | labs_mx | distributed | 8192 limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384 limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384 limit_orders_mx_1220092 | limit_orders_mx | distributed | 16384 @@ -890,12 +878,6 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR nation_mx_1220085 | nation_mx | reference | 0 nation_mx_1220085 | nation_mx | reference | 0 objects_mx_1220103 | objects_mx | distributed | 16384 - objects_mx_1220103 | objects_mx | distributed | 16384 - objects_mx_1220103 | objects_mx | distributed | 16384 - objects_mx_1220103 | objects_mx | distributed | 16384 - objects_mx_1220103 | objects_mx | distributed | 16384 - objects_mx_1220103 | objects_mx | distributed | 16384 - objects_mx_1220103 | objects_mx | distributed | 16384 orders_mx_1220068 | orders_mx | distributed | 8192 orders_mx_1220068 | orders_mx | distributed | 8192 orders_mx_1220068 | orders_mx | distributed | 8192 @@ -984,7 +966,7 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR supplier_mx_1220087 | supplier_mx | reference | 0 supplier_mx_1220087 | supplier_mx | reference | 0 supplier_mx_1220087 | supplier_mx | reference | 0 -(469 rows) +(451 rows) -- Show that altering type name is not supported from worker node ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index fee821a7d..2b4473987 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -774,15 +774,64 @@ SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id ORDER BY 1,2 LIMIT 3; -DEBUG: Creating router plan -DEBUG: query has a single distribution column value: 10 - first_author | second_word_count ---------------------------------------------------------------------- - 10 | 19519 - 10 | 19519 - 10 | 19519 -(3 rows) - +DEBUG: found no worker with all shard placements +DEBUG: push down of limit count: 3 +DEBUG: join prunable for task partitionId 0 and 1 +DEBUG: join prunable for task partitionId 0 and 2 +DEBUG: join prunable for task partitionId 0 and 3 +DEBUG: join prunable for task partitionId 0 and 4 +DEBUG: join prunable for task partitionId 0 and 5 +DEBUG: join prunable for task partitionId 1 and 0 +DEBUG: join prunable for task partitionId 1 and 2 +DEBUG: join prunable for task partitionId 1 and 3 +DEBUG: join prunable for task partitionId 1 and 4 +DEBUG: join prunable for task partitionId 1 and 5 +DEBUG: join prunable for task partitionId 2 and 0 +DEBUG: join prunable for task partitionId 2 and 1 +DEBUG: join prunable for task partitionId 2 and 3 +DEBUG: join prunable for task partitionId 2 and 4 +DEBUG: join prunable for task partitionId 2 and 5 +DEBUG: join prunable for task partitionId 3 and 0 +DEBUG: join prunable for task partitionId 3 and 1 +DEBUG: join prunable for task partitionId 3 and 2 +DEBUG: join prunable for task partitionId 3 and 4 +DEBUG: join prunable for task partitionId 3 and 5 +DEBUG: join prunable for task partitionId 4 and 0 +DEBUG: join prunable for task partitionId 4 and 1 +DEBUG: join prunable for task partitionId 4 and 2 +DEBUG: join prunable for task partitionId 4 and 3 +DEBUG: join prunable for task partitionId 4 and 5 +DEBUG: join prunable for task partitionId 5 and 0 +DEBUG: join prunable for task partitionId 5 and 1 +DEBUG: join prunable for task partitionId 5 and 2 +DEBUG: join prunable for task partitionId 5 and 3 +DEBUG: join prunable for task partitionId 5 and 4 +DEBUG: pruning merge fetch taskId 1 +DETAIL: Creating dependency on merge taskId 2 +DEBUG: pruning merge fetch taskId 2 +DETAIL: Creating dependency on merge taskId 2 +DEBUG: pruning merge fetch taskId 4 +DETAIL: Creating dependency on merge taskId 4 +DEBUG: pruning merge fetch taskId 5 +DETAIL: Creating dependency on merge taskId 4 +DEBUG: pruning merge fetch taskId 7 +DETAIL: Creating dependency on merge taskId 6 +DEBUG: pruning merge fetch taskId 8 +DETAIL: Creating dependency on merge taskId 6 +DEBUG: pruning merge fetch taskId 10 +DETAIL: Creating dependency on merge taskId 8 +DEBUG: pruning merge fetch taskId 11 +DETAIL: Creating dependency on merge taskId 8 +DEBUG: pruning merge fetch taskId 13 +DETAIL: Creating dependency on merge taskId 10 +DEBUG: pruning merge fetch taskId 14 +DETAIL: Creating dependency on merge taskId 10 +DEBUG: pruning merge fetch taskId 16 +DETAIL: Creating dependency on merge taskId 12 +DEBUG: pruning merge fetch taskId 17 +DETAIL: Creating dependency on merge taskId 12 +ERROR: the query contains a join that requires repartitioning +HINT: Set citus.enable_repartition_joins to on to enable repartitioning SET citus.enable_non_colocated_router_query_pushdown TO OFF; -- but this is not the case otherwise SELECT a.author_id as first_author, b.word_count as second_word_count