From 5512bb359a1663f5f39526670cb7708bdc2bb20b Mon Sep 17 00:00:00 2001 From: Eren Date: Thu, 2 Jun 2016 12:07:59 +0300 Subject: [PATCH] Set Explicit ShardId/JobId In Regression Tests Fixes #271 This change sets ShardIds and JobIds for each test case. Before this change, when a new test that somehow increments Job or Shard IDs is added, then the tests after the new test should be updated. ShardID and JobID sequences are set at the beginning of each file with the following commands: ``` ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; ``` ShardIds and JobIds are multiples of 10000. Exceptions are: - multi_large_shardid: shardid and jobid sequences are set to much larger values - multi_fdw_large_shardid: same as above - multi_join_pruning: Causes a race condition with multi_hash_pruning since they are run in parallel. --- src/test/regress/expected/multi_array_agg.out | 24 +-- .../expected/multi_average_expression.out | 2 + .../regress/expected/multi_basic_queries.out | 2 + .../multi_binary_master_copy_format.out | 2 + .../expected/multi_complex_expressions.out | 2 + .../expected/multi_connection_cache.out | 2 + .../expected/multi_count_type_conversion.out | 2 + .../regress/expected/multi_create_fdw.out | 2 + .../expected/multi_create_insert_proxy.out | 2 + .../regress/expected/multi_create_shards.out | 2 + .../regress/expected/multi_create_table.out | 2 + .../regress/expected/multi_data_types.out | 2 + .../expected/multi_distribution_metadata.out | 37 ++--- .../regress/expected/multi_drop_extension.out | 2 + .../expected/multi_dropped_column_aliases.out | 2 + src/test/regress/expected/multi_explain.out | 140 +++++++++--------- src/test/regress/expected/multi_explain_0.out | 140 +++++++++--------- src/test/regress/expected/multi_extension.out | 2 + .../expected/multi_fdw_create_table.out | 2 + .../expected/multi_fdw_master_protocol.out | 4 +- .../expected/multi_generate_ddl_commands.out | 2 + .../regress/expected/multi_hash_pruning.out | 116 ++++++++------- .../expected/multi_index_statements.out | 6 +- .../expected/multi_join_order_additional.out | 6 +- .../expected/multi_join_order_tpch_large.out | 2 + .../regress/expected/multi_join_pruning.out | 27 ++-- .../multi_large_table_join_planning.out | 72 ++++----- .../multi_large_table_join_planning_0.out | 72 ++++----- .../expected/multi_large_table_pruning.out | 24 +-- .../multi_large_table_task_assignment.out | 40 ++--- .../regress/expected/multi_limit_clause.out | 2 + .../multi_limit_clause_approximate.out | 2 + .../expected/multi_master_protocol.out | 4 +- .../regress/expected/multi_modifications.out | 8 +- .../multi_null_minmax_value_pruning.out | 58 ++++---- .../expected/multi_partition_pruning.out | 42 +++--- .../regress/expected/multi_prepare_plsql.out | 2 + .../regress/expected/multi_prepare_sql.out | 2 + .../expected/multi_prune_shard_list.out | 40 ++--- .../multi_query_directory_cleanup.out | 2 + .../regress/expected/multi_repair_shards.out | 2 + .../multi_repartitioned_subquery_udf.out | 2 + .../regress/expected/multi_router_planner.out | 103 ++++++------- .../regress/expected/multi_shard_modify.out | 1 + .../regress/expected/multi_simple_queries.out | 22 +-- .../multi_single_relation_subquery.out | 2 + src/test/regress/expected/multi_table_ddl.out | 2 + .../expected/multi_task_assignment_policy.out | 2 + .../regress/expected/multi_tpch_query1.out | 2 + .../regress/expected/multi_tpch_query10.out | 2 + .../regress/expected/multi_tpch_query12.out | 2 + .../regress/expected/multi_tpch_query14.out | 2 + .../regress/expected/multi_tpch_query19.out | 2 + .../regress/expected/multi_tpch_query3.out | 2 + .../regress/expected/multi_tpch_query6.out | 2 + .../regress/expected/multi_tpch_query7.out | 2 + .../expected/multi_tpch_query7_nested.out | 2 + src/test/regress/expected/multi_upsert.out | 2 + src/test/regress/expected/multi_upsert_0.out | 2 + src/test/regress/expected/multi_utilities.out | 2 + .../expected/multi_utility_statements.out | 2 + .../expected/multi_utility_warnings.out | 2 + .../multi_verify_no_join_with_alias.out | 8 +- .../expected/multi_verify_no_subquery.out | 2 + .../expected/multi_working_columns.out | 2 + .../expected/task_tracker_assign_task.out | 2 + .../expected/task_tracker_cleanup_job.out | 2 + .../expected/task_tracker_create_table.out | 2 + .../expected/task_tracker_partition_task.out | 2 + .../expected/worker_binary_data_partition.out | 2 + .../worker_check_invalid_arguments.out | 2 + .../regress/expected/worker_create_table.out | 2 + .../expected/worker_hash_partition.out | 2 + .../worker_hash_partition_complex.out | 2 + .../expected/worker_merge_hash_files.out | 2 + .../expected/worker_merge_range_files.out | 2 + .../expected/worker_null_data_partition.out | 2 + .../expected/worker_range_partition.out | 2 + .../worker_range_partition_complex.out | 2 + .../regress/input/multi_agg_distinct.source | 6 +- .../input/multi_agg_type_conversion.source | 6 +- .../input/multi_alter_table_statements.source | 9 +- .../input/multi_append_table_to_shard.source | 6 + .../input/multi_complex_count_distinct.source | 5 + src/test/regress/input/multi_copy.source | 3 + .../regress/input/multi_create_schema.source | 5 + .../input/multi_fdw_large_shardid.source | 3 + .../regress/input/multi_fdw_stage_data.source | 5 + .../regress/input/multi_large_shardid.source | 3 + .../input/multi_master_delete_protocol.source | 6 +- .../regress/input/multi_outer_join.source | 5 + .../regress/input/multi_stage_data.source | 6 +- .../input/multi_stage_large_records.source | 6 +- .../input/multi_stage_more_data.source | 5 + src/test/regress/input/multi_subquery.source | 10 +- src/test/regress/input/worker_copy.source | 5 + .../regress/output/multi_agg_distinct.source | 2 + .../output/multi_agg_type_conversion.source | 2 + .../multi_alter_table_statements.source | 13 +- .../output/multi_append_table_to_shard.source | 4 +- .../multi_complex_count_distinct.source | 2 + src/test/regress/output/multi_copy.source | 1 + .../regress/output/multi_create_schema.source | 2 + .../output/multi_fdw_large_shardid.source | 1 + .../output/multi_fdw_stage_data.source | 2 + .../regress/output/multi_large_shardid.source | 1 + .../multi_master_delete_protocol.source | 2 + .../regress/output/multi_outer_join.source | 132 +++++++++-------- .../regress/output/multi_stage_data.source | 2 + .../output/multi_stage_large_records.source | 2 + .../output/multi_stage_more_data.source | 2 + src/test/regress/output/multi_subquery.source | 17 ++- src/test/regress/output/worker_copy.source | 2 + .../sql/multi_agg_approximate_distinct.sql | 5 + src/test/regress/sql/multi_array_agg.sql | 5 + .../regress/sql/multi_average_expression.sql | 6 +- src/test/regress/sql/multi_basic_queries.sql | 5 + .../sql/multi_binary_master_copy_format.sql | 5 + .../regress/sql/multi_complex_expressions.sql | 5 + .../regress/sql/multi_connection_cache.sql | 5 + .../sql/multi_count_type_conversion.sql | 5 + src/test/regress/sql/multi_create_fdw.sql | 5 + .../regress/sql/multi_create_insert_proxy.sql | 5 + src/test/regress/sql/multi_create_shards.sql | 5 + src/test/regress/sql/multi_create_table.sql | 5 + src/test/regress/sql/multi_data_types.sql | 5 + .../sql/multi_distribution_metadata.sql | 23 +-- src/test/regress/sql/multi_drop_extension.sql | 4 + .../sql/multi_dropped_column_aliases.sql | 5 + src/test/regress/sql/multi_explain.sql | 5 + src/test/regress/sql/multi_extension.sql | 5 + .../regress/sql/multi_fdw_create_table.sql | 5 + .../regress/sql/multi_fdw_master_protocol.sql | 5 + .../sql/multi_generate_ddl_commands.sql | 5 + src/test/regress/sql/multi_hash_pruning.sql | 5 + .../regress/sql/multi_index_statements.sql | 5 + .../sql/multi_join_order_additional.sql | 5 + .../sql/multi_join_order_tpch_large.sql | 5 + src/test/regress/sql/multi_join_pruning.sql | 4 + .../sql/multi_large_table_join_planning.sql | 6 +- .../regress/sql/multi_large_table_pruning.sql | 6 +- .../sql/multi_large_table_task_assignment.sql | 6 +- src/test/regress/sql/multi_limit_clause.sql | 5 + .../sql/multi_limit_clause_approximate.sql | 5 + .../regress/sql/multi_master_protocol.sql | 6 +- src/test/regress/sql/multi_modifications.sql | 5 + .../sql/multi_null_minmax_value_pruning.sql | 19 ++- .../regress/sql/multi_partition_pruning.sql | 5 + src/test/regress/sql/multi_prepare_plsql.sql | 5 + src/test/regress/sql/multi_prepare_sql.sql | 5 + .../regress/sql/multi_prune_shard_list.sql | 5 + .../sql/multi_query_directory_cleanup.sql | 5 + src/test/regress/sql/multi_repair_shards.sql | 5 + .../sql/multi_repartitioned_subquery_udf.sql | 5 + src/test/regress/sql/multi_router_planner.sql | 7 +- src/test/regress/sql/multi_shard_modify.sql | 3 + src/test/regress/sql/multi_simple_queries.sql | 5 + .../sql/multi_single_relation_subquery.sql | 6 +- src/test/regress/sql/multi_table_ddl.sql | 5 + .../sql/multi_task_assignment_policy.sql | 5 + src/test/regress/sql/multi_tpch_query1.sql | 5 + src/test/regress/sql/multi_tpch_query10.sql | 5 + src/test/regress/sql/multi_tpch_query12.sql | 5 + src/test/regress/sql/multi_tpch_query14.sql | 5 + src/test/regress/sql/multi_tpch_query19.sql | 5 + src/test/regress/sql/multi_tpch_query3.sql | 5 + src/test/regress/sql/multi_tpch_query6.sql | 5 + src/test/regress/sql/multi_tpch_query7.sql | 5 + .../regress/sql/multi_tpch_query7_nested.sql | 5 + src/test/regress/sql/multi_upsert.sql | 5 + src/test/regress/sql/multi_utilities.sql | 5 + .../regress/sql/multi_utility_statements.sql | 5 + .../regress/sql/multi_utility_warnings.sql | 5 + .../sql/multi_verify_no_join_with_alias.sql | 9 +- .../regress/sql/multi_verify_no_subquery.sql | 5 + .../regress/sql/multi_working_columns.sql | 5 + .../regress/sql/task_tracker_assign_task.sql | 5 + .../regress/sql/task_tracker_cleanup_job.sql | 5 + .../regress/sql/task_tracker_create_table.sql | 5 + .../sql/task_tracker_partition_task.sql | 5 + .../sql/worker_binary_data_partition.sql | 5 + .../sql/worker_check_invalid_arguments.sql | 5 + src/test/regress/sql/worker_create_table.sql | 5 + .../regress/sql/worker_hash_partition.sql | 5 + .../sql/worker_hash_partition_complex.sql | 5 + .../regress/sql/worker_merge_hash_files.sql | 5 + .../regress/sql/worker_merge_range_files.sql | 5 + .../sql/worker_null_data_partition.sql | 5 + .../regress/sql/worker_range_partition.sql | 5 + .../sql/worker_range_partition_complex.sql | 5 + 190 files changed, 1221 insertions(+), 595 deletions(-) diff --git a/src/test/regress/expected/multi_array_agg.out b/src/test/regress/expected/multi_array_agg.out index b3fe1b307..b473b7967 100644 --- a/src/test/regress/expected/multi_array_agg.out +++ b/src/test/regress/expected/multi_array_agg.out @@ -1,6 +1,8 @@ -- -- MULTI_ARRAY_AGG -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 520000; -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); array_cat_agg @@ -96,10 +98,10 @@ SELECT l_quantity, count(*), avg(l_extendedprice), array_agg(l_orderkey) FROM li GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | array_agg ------------+-------+-----------------------+-------------------------------------------------------------------------------------------------- - 1.00 | 17 | 1477.1258823529411765 | {8997,9026,9158,9184,9220,9222,9348,9383,9476,5543,5633,5634,5698,5766,5856,5857,5986} - 2.00 | 19 | 3078.4242105263157895 | {9030,9058,9123,9124,9188,9344,9441,9476,5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923} - 3.00 | 14 | 4714.0392857142857143 | {9124,9157,9184,9223,9254,9349,9414,9475,9477,5509,5543,5605,5606,5827} - 4.00 | 19 | 5929.7136842105263158 | {9091,9120,9281,9347,9382,9440,9473,5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985} + 1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476} + 2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476} + 3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477} + 4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473} (4 rows) SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month @@ -107,10 +109,10 @@ SELECT l_quantity, array_agg(extract (month FROM o_orderdate)) AS my_month AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | my_month ------------+------------------------------------------------ - 1.00 | {7,7,4,7,4,2,6,3,5,9,5,7,5,9,11,11,4} - 2.00 | {7,6,6,10,1,12,6,5,11,10,8,5,5,12,3,11,7,11,5} - 3.00 | {10,6,7,8,5,8,9,11,3,4,9,8,11,7} - 4.00 | {11,6,2,8,2,6,10,1,5,6,11,12,10,9,6,1,2,5,1} + 1.00 | {9,5,7,5,9,11,11,4,7,7,4,7,4,2,6,3,5} + 2.00 | {11,10,8,5,5,12,3,11,7,11,5,7,6,6,10,1,12,6,5} + 3.00 | {4,9,8,11,7,10,6,7,8,5,8,9,11,3} + 4.00 | {1,5,6,11,12,10,9,6,1,2,5,1,11,6,2,8,2,6,10} (4 rows) SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity < 5 @@ -118,10 +120,10 @@ SELECT l_quantity, array_agg(l_orderkey * 2 + 1) FROM lineitem WHERE l_quantity AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | array_agg ------------+--------------------------------------------- - 1.00 | {18317,18445,11269,11397,11713,11715,11973} - 2.00 | {18061,18247,18953,11847} + 1.00 | {11269,11397,11713,11715,11973,18317,18445} + 2.00 | {11847,18061,18247,18953} 3.00 | {18249,18315,18699,18951,18955} - 4.00 | {18241,18765,11653,11659} + 4.00 | {11653,11659,18241,18765} (4 rows) -- Check that we can execute array_agg() with an expression containing NULL values diff --git a/src/test/regress/expected/multi_average_expression.out b/src/test/regress/expected/multi_average_expression.out index 74511d553..a0df52dfc 100644 --- a/src/test/regress/expected/multi_average_expression.out +++ b/src/test/regress/expected/multi_average_expression.out @@ -4,6 +4,8 @@ -- This test checks that the group-by columns don't need to be above an average -- expression, and can be anywhere in the projection order. This is in response -- to a bug we had due to the average expression introducing new columns. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 450000; SELECT sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, diff --git a/src/test/regress/expected/multi_basic_queries.out b/src/test/regress/expected/multi_basic_queries.out index f226ac6e3..5a3eaaf04 100644 --- a/src/test/regress/expected/multi_basic_queries.out +++ b/src/test/regress/expected/multi_basic_queries.out @@ -1,6 +1,8 @@ -- -- MULTI_BASIC_QUERIES -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 440000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 440000; -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. SELECT count(*) FROM lineitem; diff --git a/src/test/regress/expected/multi_binary_master_copy_format.out b/src/test/regress/expected/multi_binary_master_copy_format.out index 505d18a34..5e213e175 100644 --- a/src/test/regress/expected/multi_binary_master_copy_format.out +++ b/src/test/regress/expected/multi_binary_master_copy_format.out @@ -1,6 +1,8 @@ -- -- MULTI_BINARY_MASTER_COPY -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 430000; -- Try binary master copy for different executors SET citus.binary_master_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; diff --git a/src/test/regress/expected/multi_complex_expressions.out b/src/test/regress/expected/multi_complex_expressions.out index f1f902586..3217e1681 100644 --- a/src/test/regress/expected/multi_complex_expressions.out +++ b/src/test/regress/expected/multi_complex_expressions.out @@ -1,6 +1,8 @@ -- -- MULTI_COMPLEX_EXPRESSIONS -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 420000; -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; ?column? diff --git a/src/test/regress/expected/multi_connection_cache.out b/src/test/regress/expected/multi_connection_cache.out index ac808a0af..84e927bbe 100644 --- a/src/test/regress/expected/multi_connection_cache.out +++ b/src/test/regress/expected/multi_connection_cache.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 410000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 410000; -- =================================================================== -- create test functions -- =================================================================== diff --git a/src/test/regress/expected/multi_count_type_conversion.out b/src/test/regress/expected/multi_count_type_conversion.out index a258ebef2..ef5cefef6 100644 --- a/src/test/regress/expected/multi_count_type_conversion.out +++ b/src/test/regress/expected/multi_count_type_conversion.out @@ -1,6 +1,8 @@ -- -- MULTI_COUNT_TYPE_CONVERSION -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 400000; -- Verify that we can sort count(*) results correctly. We perform this check as -- our count() operations execute in two steps: worker nodes report their -- count() results, and the master node sums these counts up. During this sum(), diff --git a/src/test/regress/expected/multi_create_fdw.out b/src/test/regress/expected/multi_create_fdw.out index 006ad6d1a..c19446ab3 100644 --- a/src/test/regress/expected/multi_create_fdw.out +++ b/src/test/regress/expected/multi_create_fdw.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 390000; -- =================================================================== -- get ready for the foreign data wrapper tests -- =================================================================== diff --git a/src/test/regress/expected/multi_create_insert_proxy.out b/src/test/regress/expected/multi_create_insert_proxy.out index d90262c4a..93faa5e71 100644 --- a/src/test/regress/expected/multi_create_insert_proxy.out +++ b/src/test/regress/expected/multi_create_insert_proxy.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 380000; -- =================================================================== -- test INSERT proxy creation functionality -- =================================================================== diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index 39cb45eef..cf3dfc3cd 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 370000; -- =================================================================== -- create test functions and types needed for tests -- =================================================================== diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index dd3f1c167..b768aebce 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -1,6 +1,8 @@ -- -- MULTI_CREATE_TABLE -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 360000; -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. CREATE TABLE lineitem ( diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index 18b7a54dc..2c3de9e53 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -2,6 +2,8 @@ -- test composite type, varchar and enum types -- create, distribute, INSERT, SELECT and UPDATE -- =================================================================== +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 530000; -- create a custom type... CREATE TYPE test_composite_type AS ( i integer, diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index 88d5a3ba1..5cc8c5473 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -1,6 +1,8 @@ -- =================================================================== -- create test functions -- =================================================================== +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 540000; CREATE FUNCTION load_shard_id_array(regclass) RETURNS bigint[] AS 'citus' @@ -75,16 +77,16 @@ SELECT master_create_worker_shards('events_hash', 4, 2); (1 row) -- set shardstate of one replication from each shard to 0 (invalid value) -UPDATE pg_dist_shard_placement SET shardstate = 0 WHERE nodeport = 57638 AND shardid BETWEEN 103025 AND 103028; +UPDATE pg_dist_shard_placement SET shardstate = 0 WHERE nodeport = 57638 AND shardid BETWEEN 540000 AND 540003; -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); load_shard_id_array ------------------------------- - {103025,103026,103027,103028} + {540000,540001,540002,540003} (1 row) -- should see array with first shard range -SELECT load_shard_interval_array(103025, 0); +SELECT load_shard_interval_array(540000, 0); load_shard_interval_array --------------------------- {-2147483648,-1073741825} @@ -106,42 +108,41 @@ SELECT master_create_distributed_table('events_range', 'name', 'range'); SELECT master_create_empty_shard('events_range'); master_create_empty_shard --------------------------- - 103029 + 540004 (1 row) UPDATE pg_dist_shard SET shardminvalue = 'Aardvark', shardmaxvalue = 'Zebra' -WHERE shardid = 103029; -SELECT load_shard_interval_array(103029, ''::text); +WHERE shardid = 540004; +SELECT load_shard_interval_array(540004, ''::text); load_shard_interval_array --------------------------- {Aardvark,Zebra} (1 row) -- should see error for non-existent shard -SELECT load_shard_interval_array(103030, 0); -ERROR: could not find valid entry for shard 103030 +SELECT load_shard_interval_array(540005, 0); +ERROR: could not find valid entry for shard 540005 -- should see two placements -SELECT load_shard_placement_array(103026, false); +SELECT load_shard_placement_array(540001, false); load_shard_placement_array ----------------------------------- {localhost:57637,localhost:57638} (1 row) -- only one of which is finalized -SELECT load_shard_placement_array(103026, true); +SELECT load_shard_placement_array(540001, true); load_shard_placement_array ---------------------------- {localhost:57637} (1 row) -- should see error for non-existent shard -SELECT load_shard_placement_array(103031, false); -WARNING: could not find any shard placements for shardId 103031 - load_shard_placement_array ----------------------------- - {} +SELECT load_shard_placement_array(540001, false); + load_shard_placement_array +----------------------------------- + {localhost:57637,localhost:57638} (1 row) -- should see column id of 'name' @@ -192,7 +193,7 @@ SELECT column_name_to_column_id('events_hash', 'non_existent'); ERROR: column "non_existent" of relation "events_hash" does not exist -- drop shard rows (must drop placements first) DELETE FROM pg_dist_shard_placement - WHERE shardid BETWEEN 103025 AND 103029; + WHERE shardid BETWEEN 540000 AND 540004; DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_hash'::regclass; DELETE FROM pg_dist_shard @@ -275,9 +276,9 @@ WHERE shardid = :new_shard_id AND nodename = 'localhost' and nodeport = 5432; -- deleting or updating a non-existent row should fail SELECT delete_shard_placement_row(:new_shard_id, 'wrong_localhost', 5432); -ERROR: could not find valid entry for shard placement 103030 on node "wrong_localhost:5432" +ERROR: could not find valid entry for shard placement 540005 on node "wrong_localhost:5432" SELECT update_shard_placement_row_state(:new_shard_id, 'localhost', 5432, 3); -ERROR: could not find valid entry for shard placement 103030 on node "localhost:5432" +ERROR: could not find valid entry for shard placement 540005 on node "localhost:5432" -- now we'll even test our lock methods... -- use transaction to bound how long we hold the lock BEGIN; diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index d5c1e2627..82bd8a47d 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -2,6 +2,8 @@ -- MULTI_DROP_EXTENSION -- -- Tests around dropping and recreating the extension +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table diff --git a/src/test/regress/expected/multi_dropped_column_aliases.out b/src/test/regress/expected/multi_dropped_column_aliases.out index 4329adc63..e24ba9a5b 100644 --- a/src/test/regress/expected/multi_dropped_column_aliases.out +++ b/src/test/regress/expected/multi_dropped_column_aliases.out @@ -1,5 +1,7 @@ -- Tests that check that our query functionality behaves as expected when the -- table schema is modified via ALTER statements. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 620000; SELECT count(*) FROM customer; count ------- diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 8ea35368a..870b77ae8 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -1,6 +1,8 @@ -- -- MULTI_EXPLAIN -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000; \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; @@ -30,7 +32,7 @@ $BODY$ LANGUAGE plpgsql; EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Distributed Query into pg_merge_job_0040 +Distributed Query into pg_merge_job_570000 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -38,13 +40,13 @@ Distributed Query into pg_merge_job_0040 Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Master Query -> Sort - Sort Key: (sum(((sum(intermediate_column_40_1))::bigint)))::bigint, intermediate_column_40_0 + Sort Key: (sum(((sum(intermediate_column_570000_1))::bigint)))::bigint, intermediate_column_570000_0 -> HashAggregate - Group Key: intermediate_column_40_0 - -> Seq Scan on pg_merge_job_0040 + Group Key: intermediate_column_570000_0 + -> Seq Scan on pg_merge_job_570000 -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem @@ -69,7 +71,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Node Type": "Seq Scan", "Parent Relationship": "Outer", - "Relation Name": "lineitem_102010", + "Relation Name": "lineitem_290000", "Alias": "lineitem" } ] @@ -85,19 +87,19 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Plan": { "Node Type": "Sort", - "Sort Key": ["(sum(((sum(intermediate_column_41_1))::bigint)))::bigint", "intermediate_column_41_0"], + "Sort Key": ["(sum(((sum(intermediate_column_570001_1))::bigint)))::bigint", "intermediate_column_570001_0"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Parent Relationship": "Outer", - "Group Key": ["intermediate_column_41_0"], + "Group Key": ["intermediate_column_570001_0"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", - "Relation Name": "pg_merge_job_0041", - "Alias": "pg_merge_job_0041" + "Relation Name": "pg_merge_job_570001", + "Alias": "pg_merge_job_570001" } ] } @@ -138,7 +140,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Seq Scan Outer - lineitem_102010 + lineitem_290000 lineitem @@ -154,8 +156,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Sort - (sum(((sum(intermediate_column_43_1))::bigint)))::bigint - intermediate_column_43_0 + (sum(((sum(intermediate_column_570003_1))::bigint)))::bigint + intermediate_column_570003_0 @@ -163,14 +165,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Hashed Outer - intermediate_column_43_0 + intermediate_column_570003_0 Seq Scan Outer - pg_merge_job_0043 - pg_merge_job_0043 + pg_merge_job_570003 + pg_merge_job_570003 @@ -204,31 +206,31 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" - Relation Name: "lineitem_102010" + Relation Name: "lineitem_290000" Alias: "lineitem" Master Query: - Plan: Node Type: "Sort" Sort Key: - - "(sum(((sum(intermediate_column_45_1))::bigint)))::bigint" - - "intermediate_column_45_0" + - "(sum(((sum(intermediate_column_570005_1))::bigint)))::bigint" + - "intermediate_column_570005_0" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Parent Relationship: "Outer" Group Key: - - "intermediate_column_45_0" + - "intermediate_column_570005_0" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" - Relation Name: "pg_merge_job_0045" - Alias: "pg_merge_job_0045" + Relation Name: "pg_merge_job_570005" + Alias: "pg_merge_job_570005" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Distributed Query into pg_merge_job_0046 +Distributed Query into pg_merge_job_570006 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -236,17 +238,17 @@ Distributed Query into pg_merge_job_0046 Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Master Query -> Sort - Sort Key: (sum(((sum(intermediate_column_46_1))::bigint)))::bigint, intermediate_column_46_0 + Sort Key: (sum(((sum(intermediate_column_570006_1))::bigint)))::bigint, intermediate_column_570006_0 -> HashAggregate - Group Key: intermediate_column_46_0 - -> Seq Scan on pg_merge_job_0046 + Group Key: intermediate_column_570006_0 + -> Seq Scan on pg_merge_job_570006 -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; -Distributed Query into pg_merge_job_0047 +Distributed Query into pg_merge_job_570007 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -254,19 +256,19 @@ Distributed Query into pg_merge_job_0047 Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) - -> Seq Scan on public.lineitem_102010 lineitem + -> Seq Scan on public.lineitem_290000 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment Master Query -> Aggregate - Output: (sum(intermediate_column_47_0) / (sum(intermediate_column_47_1) / sum(intermediate_column_47_2))) - -> Seq Scan on pg_temp_2.pg_merge_job_0047 - Output: intermediate_column_47_0, intermediate_column_47_1, intermediate_column_47_2 + Output: (sum(intermediate_column_570007_0) / (sum(intermediate_column_570007_1) / sum(intermediate_column_570007_2))) + -> Seq Scan on pg_temp_2.pg_merge_job_570007 + Output: intermediate_column_570007_0, intermediate_column_570007_1, intermediate_column_570007_2 -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5 ORDER BY l_quantity DESC LIMIT 10; -Distributed Query into pg_merge_job_0048 +Distributed Query into pg_merge_job_570008 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -277,15 +279,15 @@ Distributed Query into pg_merge_job_0048 Sort Key: lineitem.l_quantity DESC -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Filter: (l_quantity < '5'::numeric) -> Hash - -> Seq Scan on orders_102015 orders + -> Seq Scan on orders_290006 orders Master Query -> Limit -> Sort - Sort Key: intermediate_column_48_4 DESC - -> Seq Scan on pg_merge_job_0048 + Sort Key: intermediate_column_570008_4 DESC + -> Seq Scan on pg_merge_job_570008 -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES(1,0); @@ -294,8 +296,8 @@ Distributed Query Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression - -> Insert on lineitem_102009 + Node: host=localhost port=57637 dbname=regression + -> Insert on lineitem_290000 -> Result -- Test update EXPLAIN (COSTS FALSE) @@ -307,12 +309,12 @@ Distributed Query Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression - -> Update on lineitem_102009 - -> Bitmap Heap Scan on lineitem_102009 + Node: host=localhost port=57637 dbname=regression + -> Update on lineitem_290000 + -> Bitmap Heap Scan on lineitem_290000 Recheck Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_102009 + -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 1) -- Test delete EXPLAIN (COSTS FALSE) @@ -323,25 +325,25 @@ Distributed Query Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression - -> Delete on lineitem_102009 - -> Bitmap Heap Scan on lineitem_102009 + Node: host=localhost port=57637 dbname=regression + -> Delete on lineitem_290000 + -> Bitmap Heap Scan on lineitem_290000 Recheck Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_102009 + -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 1) -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; -Distributed Query into pg_merge_job_0049 +Distributed Query into pg_merge_job_570009 Executor: Router Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_102009 lineitem + -> Bitmap Heap Scan on lineitem_290000 lineitem Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_102009 + -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); @@ -353,41 +355,41 @@ t EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem; -Distributed Query into pg_merge_job_0052 +Distributed Query into pg_merge_job_570012 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 -> Task Node: host=localhost port=57637 dbname=regression - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Master Query - -> Seq Scan on pg_merge_job_0052 + -> Seq Scan on pg_merge_job_570012 -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Distributed Query into pg_merge_job_0053 +Distributed Query into pg_merge_job_570013 Executor: Real-Time Task Count: 3 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102012 lineitem + -> Seq Scan on lineitem_290004 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102013 lineitem + -> Seq Scan on lineitem_290003 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102014 lineitem + -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) Master Query -> Aggregate - -> Seq Scan on pg_merge_job_0053 + -> Seq Scan on pg_merge_job_570013 SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t @@ -399,18 +401,18 @@ SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Distributed Query into pg_merge_job_0056 +Distributed Query into pg_merge_job_570016 Executor: Task-Tracker Task Count: 3 Tasks Shown: One of 3 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102012 lineitem + -> Seq Scan on lineitem_290004 lineitem Filter: (l_orderkey > 9030) Master Query -> Aggregate - -> Seq Scan on pg_merge_job_0056 + -> Seq Scan on pg_merge_job_570016 -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) @@ -419,7 +421,7 @@ EXPLAIN (COSTS FALSE) WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -Distributed Query into pg_merge_job_0059 +Distributed Query into pg_merge_job_570019 Executor: Task-Tracker Task Count: 1 Tasks Shown: None, not supported for re-partition queries @@ -431,7 +433,7 @@ Distributed Query into pg_merge_job_0059 Merge Task Count: 1 Master Query -> Aggregate - -> Seq Scan on pg_merge_job_0059 + -> Seq Scan on pg_merge_job_570019 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem, orders, customer, supplier @@ -466,8 +468,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Node Type": "Seq Scan", "Parent Relationship": "Outer", - "Relation Name": "pg_merge_job_0062", - "Alias": "pg_merge_job_0062" + "Relation Name": "pg_merge_job_570022", + "Alias": "pg_merge_job_570022" } ] } @@ -516,8 +518,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Seq Scan Outer - pg_merge_job_0068 - pg_merge_job_0068 + pg_merge_job_570028 + pg_merge_job_570028 @@ -555,5 +557,5 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" - Relation Name: "pg_merge_job_0074" - Alias: "pg_merge_job_0074" + Relation Name: "pg_merge_job_570034" + Alias: "pg_merge_job_570034" diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 250678e0f..ff6c24944 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -1,6 +1,8 @@ -- -- MULTI_EXPLAIN -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000; \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; @@ -30,7 +32,7 @@ $BODY$ LANGUAGE plpgsql; EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Distributed Query into pg_merge_job_0040 +Distributed Query into pg_merge_job_570000 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -38,13 +40,13 @@ Distributed Query into pg_merge_job_0040 Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Master Query -> Sort - Sort Key: (sum(((sum(intermediate_column_40_1))::bigint)))::bigint, intermediate_column_40_0 + Sort Key: (sum(((sum(intermediate_column_570000_1))::bigint)))::bigint, intermediate_column_570000_0 -> HashAggregate - Group Key: intermediate_column_40_0 - -> Seq Scan on pg_merge_job_0040 + Group Key: intermediate_column_570000_0 + -> Seq Scan on pg_merge_job_570000 -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem @@ -69,7 +71,7 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Node Type": "Seq Scan", "Parent Relationship": "Outer", - "Relation Name": "lineitem_102010", + "Relation Name": "lineitem_290000", "Alias": "lineitem" } ] @@ -85,19 +87,19 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Plan": { "Node Type": "Sort", - "Sort Key": ["(sum(((sum(intermediate_column_41_1))::bigint)))::bigint", "intermediate_column_41_0"], + "Sort Key": ["(sum(((sum(intermediate_column_570001_1))::bigint)))::bigint", "intermediate_column_570001_0"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Parent Relationship": "Outer", - "Group Key": ["intermediate_column_41_0"], + "Group Key": ["intermediate_column_570001_0"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", - "Relation Name": "pg_merge_job_0041", - "Alias": "pg_merge_job_0041" + "Relation Name": "pg_merge_job_570001", + "Alias": "pg_merge_job_570001" } ] } @@ -138,7 +140,7 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Seq Scan Outer - lineitem_102010 + lineitem_290000 lineitem @@ -154,8 +156,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Sort - (sum(((sum(intermediate_column_43_1))::bigint)))::bigint - intermediate_column_43_0 + (sum(((sum(intermediate_column_570003_1))::bigint)))::bigint + intermediate_column_570003_0 @@ -163,14 +165,14 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Hashed Outer - intermediate_column_43_0 + intermediate_column_570003_0 Seq Scan Outer - pg_merge_job_0043 - pg_merge_job_0043 + pg_merge_job_570003 + pg_merge_job_570003 @@ -204,31 +206,31 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" - Relation Name: "lineitem_102010" + Relation Name: "lineitem_290000" Alias: "lineitem" Master Query: - Plan: Node Type: "Sort" Sort Key: - - "(sum(((sum(intermediate_column_45_1))::bigint)))::bigint" - - "intermediate_column_45_0" + - "(sum(((sum(intermediate_column_570005_1))::bigint)))::bigint" + - "intermediate_column_570005_0" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Parent Relationship: "Outer" Group Key: - - "intermediate_column_45_0" + - "intermediate_column_570005_0" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" - Relation Name: "pg_merge_job_0045" - Alias: "pg_merge_job_0045" + Relation Name: "pg_merge_job_570005" + Alias: "pg_merge_job_570005" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -Distributed Query into pg_merge_job_0046 +Distributed Query into pg_merge_job_570006 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -236,17 +238,17 @@ Distributed Query into pg_merge_job_0046 Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Master Query -> Sort - Sort Key: (sum(((sum(intermediate_column_46_1))::bigint)))::bigint, intermediate_column_46_0 + Sort Key: (sum(((sum(intermediate_column_570006_1))::bigint)))::bigint, intermediate_column_570006_0 -> HashAggregate - Group Key: intermediate_column_46_0 - -> Seq Scan on pg_merge_job_0046 + Group Key: intermediate_column_570006_0 + -> Seq Scan on pg_merge_job_570006 -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; -Distributed Query into pg_merge_job_0047 +Distributed Query into pg_merge_job_570007 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -254,19 +256,19 @@ Distributed Query into pg_merge_job_0047 Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) - -> Seq Scan on public.lineitem_102010 lineitem + -> Seq Scan on public.lineitem_290000 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment Master Query -> Aggregate - Output: (sum(intermediate_column_47_0) / (sum(intermediate_column_47_1) / sum(intermediate_column_47_2))) - -> Seq Scan on pg_temp_2.pg_merge_job_0047 - Output: intermediate_column_47_0, intermediate_column_47_1, intermediate_column_47_2 + Output: (sum(intermediate_column_570007_0) / (sum(intermediate_column_570007_1) / sum(intermediate_column_570007_2))) + -> Seq Scan on pg_temp_2.pg_merge_job_570007 + Output: intermediate_column_570007_0, intermediate_column_570007_1, intermediate_column_570007_2 -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5 ORDER BY l_quantity DESC LIMIT 10; -Distributed Query into pg_merge_job_0048 +Distributed Query into pg_merge_job_570008 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 @@ -277,15 +279,15 @@ Distributed Query into pg_merge_job_0048 Sort Key: lineitem.l_quantity -> Hash Join Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Filter: (l_quantity < 5::numeric) -> Hash - -> Seq Scan on orders_102015 orders + -> Seq Scan on orders_290006 orders Master Query -> Limit -> Sort - Sort Key: intermediate_column_48_4 - -> Seq Scan on pg_merge_job_0048 + Sort Key: intermediate_column_570008_4 + -> Seq Scan on pg_merge_job_570008 -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES(1,0); @@ -294,8 +296,8 @@ Distributed Query Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression - -> Insert on lineitem_102009 + Node: host=localhost port=57637 dbname=regression + -> Insert on lineitem_290000 -> Result -- Test update EXPLAIN (COSTS FALSE) @@ -307,12 +309,12 @@ Distributed Query Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression - -> Update on lineitem_102009 - -> Bitmap Heap Scan on lineitem_102009 + Node: host=localhost port=57637 dbname=regression + -> Update on lineitem_290000 + -> Bitmap Heap Scan on lineitem_290000 Recheck Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_102009 + -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 1) -- Test delete EXPLAIN (COSTS FALSE) @@ -323,25 +325,25 @@ Distributed Query Task Count: 1 Tasks Shown: All -> Task - Node: host=localhost port=57638 dbname=regression - -> Delete on lineitem_102009 - -> Bitmap Heap Scan on lineitem_102009 + Node: host=localhost port=57637 dbname=regression + -> Delete on lineitem_290000 + -> Bitmap Heap Scan on lineitem_290000 Recheck Cond: (l_orderkey = 1) Filter: (l_partkey = 0) - -> Bitmap Index Scan on lineitem_pkey_102009 + -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 1) -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; -Distributed Query into pg_merge_job_0049 +Distributed Query into pg_merge_job_570009 Executor: Router Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression - -> Bitmap Heap Scan on lineitem_102009 lineitem + -> Bitmap Heap Scan on lineitem_290000 lineitem Recheck Cond: (l_orderkey = 5) - -> Bitmap Index Scan on lineitem_pkey_102009 + -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); @@ -353,41 +355,41 @@ t EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem; -Distributed Query into pg_merge_job_0052 +Distributed Query into pg_merge_job_570012 Executor: Real-Time Task Count: 6 Tasks Shown: One of 6 -> Task Node: host=localhost port=57637 dbname=regression - -> Seq Scan on lineitem_102010 lineitem + -> Seq Scan on lineitem_290000 lineitem Master Query - -> Seq Scan on pg_merge_job_0052 + -> Seq Scan on pg_merge_job_570012 -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Distributed Query into pg_merge_job_0053 +Distributed Query into pg_merge_job_570013 Executor: Real-Time Task Count: 3 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102012 lineitem + -> Seq Scan on lineitem_290004 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102013 lineitem + -> Seq Scan on lineitem_290003 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102014 lineitem + -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) Master Query -> Aggregate - -> Seq Scan on pg_merge_job_0053 + -> Seq Scan on pg_merge_job_570013 SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t @@ -399,18 +401,18 @@ SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -Distributed Query into pg_merge_job_0056 +Distributed Query into pg_merge_job_570016 Executor: Task-Tracker Task Count: 3 Tasks Shown: One of 3 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate - -> Seq Scan on lineitem_102012 lineitem + -> Seq Scan on lineitem_290004 lineitem Filter: (l_orderkey > 9030) Master Query -> Aggregate - -> Seq Scan on pg_merge_job_0056 + -> Seq Scan on pg_merge_job_570016 -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) @@ -419,7 +421,7 @@ EXPLAIN (COSTS FALSE) WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -Distributed Query into pg_merge_job_0059 +Distributed Query into pg_merge_job_570019 Executor: Task-Tracker Task Count: 1 Tasks Shown: None, not supported for re-partition queries @@ -431,7 +433,7 @@ Distributed Query into pg_merge_job_0059 Merge Task Count: 1 Master Query -> Aggregate - -> Seq Scan on pg_merge_job_0059 + -> Seq Scan on pg_merge_job_570019 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem, orders, customer, supplier @@ -466,8 +468,8 @@ EXPLAIN (COSTS FALSE, FORMAT JSON) { "Node Type": "Seq Scan", "Parent Relationship": "Outer", - "Relation Name": "pg_merge_job_0062", - "Alias": "pg_merge_job_0062" + "Relation Name": "pg_merge_job_570022", + "Alias": "pg_merge_job_570022" } ] } @@ -516,8 +518,8 @@ EXPLAIN (COSTS FALSE, FORMAT XML) Seq Scan Outer - pg_merge_job_0068 - pg_merge_job_0068 + pg_merge_job_570028 + pg_merge_job_570028 @@ -555,5 +557,5 @@ EXPLAIN (COSTS FALSE, FORMAT YAML) Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" - Relation Name: "pg_merge_job_0074" - Alias: "pg_merge_job_0074" + Relation Name: "pg_merge_job_570034" + Alias: "pg_merge_job_570034" diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 4983208f6..3228d2e30 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -5,6 +5,8 @@ -- -- It'd be nice to script generation of this file, but alas, that's -- not done yet. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 580000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000; -- DROP EXTENSION pre-created by the regression suite DROP EXTENSION citus; \c diff --git a/src/test/regress/expected/multi_fdw_create_table.out b/src/test/regress/expected/multi_fdw_create_table.out index 6557e318a..8facf2d81 100644 --- a/src/test/regress/expected/multi_fdw_create_table.out +++ b/src/test/regress/expected/multi_fdw_create_table.out @@ -1,6 +1,8 @@ -- -- MULTI_FDW_CREATE_TABLE -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 590000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 590000; -- Create new table definitions for use in testing in distributed foreign data -- wrapper functionality. SELECT fdwname FROM pg_foreign_data_wrapper; diff --git a/src/test/regress/expected/multi_fdw_master_protocol.out b/src/test/regress/expected/multi_fdw_master_protocol.out index bffb6d356..8fb73ac13 100644 --- a/src/test/regress/expected/multi_fdw_master_protocol.out +++ b/src/test/regress/expected/multi_fdw_master_protocol.out @@ -2,6 +2,8 @@ -- MULTI_FDW_MASTER_PROTOCOL -- -- Tests that check the metadata returned by the master node. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 600000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 600000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy @@ -20,7 +22,7 @@ SELECT * FROM master_get_table_ddl_events('lineitem'); SELECT * FROM master_get_new_shardid(); master_get_new_shardid ------------------------ - 102008 + 600000 (1 row) SELECT node_name FROM master_get_local_first_candidate_nodes(); diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index ff856b423..bb929e533 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 610000; -- =================================================================== -- create test functions -- =================================================================== diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index 84a977fce..c7dc9e30b 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -2,6 +2,8 @@ -- MULTI_HASH_PRUNING -- -- Tests for shard and join pruning logic on hash partitioned tables. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; -- Create a table partitioned on integer column and update partition type to -- hash. Then stage data to this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, @@ -47,9 +49,9 @@ SELECT count(*) FROM orders_hash_partitioned; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 DEBUG: Plan is router executable count ------- @@ -58,9 +60,9 @@ DEBUG: Plan is router executable SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 DEBUG: Plan is router executable count ------- @@ -69,9 +71,9 @@ DEBUG: Plan is router executable SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 DEBUG: Plan is router executable count ------- @@ -80,9 +82,9 @@ DEBUG: Plan is router executable SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 DEBUG: Plan is router executable count ------- @@ -92,9 +94,9 @@ DEBUG: Plan is router executable SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 DEBUG: Plan is router executable count ------- @@ -103,9 +105,9 @@ DEBUG: Plan is router executable SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 DEBUG: Plan is router executable count ------- @@ -120,36 +122,36 @@ SELECT count(*) FROM orders_hash_partitioned; (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -157,18 +159,18 @@ DEBUG: predicate pruning for shardId 102036 SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -176,9 +178,9 @@ DEBUG: predicate pruning for shardId 102036 SET citus.task_executor_type TO :actual_task_executor; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL; -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -198,8 +200,8 @@ SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey = 2; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 count ------- 0 @@ -214,8 +216,8 @@ SELECT count(*) FROM orders_hash_partitioned SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa'); -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -223,8 +225,8 @@ DEBUG: predicate pruning for shardId 102036 SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey is NULL; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -232,9 +234,9 @@ DEBUG: predicate pruning for shardId 102036 SELECT count(*) FROM (SELECT o_orderkey FROM orders_hash_partitioned WHERE o_orderkey = 1) AS orderkeys; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -285,9 +287,9 @@ SELECT count(*) FROM orders_hash_partitioned SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() AND o_orderkey = 1; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 count ------- 0 @@ -319,12 +321,12 @@ SELECT count(*) WHERE orders1.o_orderkey = orders2.o_orderkey AND orders1.o_orderkey = 1 AND orders2.o_orderkey is NULL; -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102035 -DEBUG: predicate pruning for shardId 102036 -DEBUG: predicate pruning for shardId 102033 -DEBUG: predicate pruning for shardId 102034 -DEBUG: predicate pruning for shardId 102036 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630002 +DEBUG: predicate pruning for shardId 630003 +DEBUG: predicate pruning for shardId 630000 +DEBUG: predicate pruning for shardId 630001 +DEBUG: predicate pruning for shardId 630003 DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823] count ------- diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 2fd7a89e6..b8258596f 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -3,6 +3,8 @@ -- -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 640000; -- -- CREATE TEST TABLES -- @@ -137,11 +139,11 @@ ERROR: creating unique indexes on append-partitioned tables is currently unsupp CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); ERROR: relation "lineitem_orderkey_index" already exists CREATE INDEX try_index ON lineitem USING gist (l_orderkey); -WARNING: Bad result from localhost:57637 +WARNING: Bad result from localhost:57638 DETAIL: Remote message: data type bigint has no default operator class for access method "gist" ERROR: could not execute DDL command on worker node shards CREATE INDEX try_index ON lineitem (non_existent_column); -WARNING: Bad result from localhost:57637 +WARNING: Bad result from localhost:57638 DETAIL: Remote message: column "non_existent_column" does not exist ERROR: could not execute DDL command on worker node shards CREATE INDEX ON lineitem (l_orderkey); diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index f4380134d..20698676a 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -1,6 +1,8 @@ -- -- MULTI_JOIN_ORDER_ADDITIONAL -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 650000; -- Set configuration to print table join order and pruned shards SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; @@ -40,8 +42,8 @@ SELECT master_create_worker_shards('lineitem_hash', 2, 1); (1 row) CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate); -DEBUG: applied command on shard 102037 on node localhost:57637 -DEBUG: applied command on shard 102038 on node localhost:57638 +DEBUG: applied command on shard 650000 on node localhost:57637 +DEBUG: applied command on shard 650001 on node localhost:57638 DEBUG: building index "lineitem_hash_time_index" on table "lineitem_hash" CREATE TABLE orders_hash ( o_orderkey bigint not null, diff --git a/src/test/regress/expected/multi_join_order_tpch_large.out b/src/test/regress/expected/multi_join_order_tpch_large.out index 04759db3b..6532d9ea1 100644 --- a/src/test/regress/expected/multi_join_order_tpch_large.out +++ b/src/test/regress/expected/multi_join_order_tpch_large.out @@ -1,6 +1,8 @@ -- -- MULTI_JOIN_ORDER_TPCH_LARGE -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 660000; -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index 2b2968321..fd77d607b 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -1,6 +1,7 @@ -- -- MULTI_JOIN_PRUNING -- +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 680000; -- Check that join-pruning works for joins between two large relations. For now -- we only check for join-pruning between locally partitioned relations. In the -- future we want to check for pruning between re-partitioned relations as well. @@ -23,9 +24,9 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 DEBUG: join prunable for intervals [8997,11554] and [1,5986] DEBUG: join prunable for intervals [11554,13920] and [1,5986] DEBUG: join prunable for intervals [13921,14947] and [1,5986] @@ -38,12 +39,12 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] -- works as expected in this case. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 20000; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102012 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290003 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 sum | avg -----+----- | @@ -54,10 +55,10 @@ DEBUG: predicate pruning for shardId 102014 -- out all the shards, and leave us with an empty task list. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102016 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290007 DEBUG: join prunable for intervals [8997,11554] and [1,5986] DEBUG: join prunable for intervals [11554,13920] and [1,5986] DEBUG: join prunable for intervals [13921,14947] and [1,5986] diff --git a/src/test/regress/expected/multi_large_table_join_planning.out b/src/test/regress/expected/multi_large_table_join_planning.out index ee2491426..70dfe85a0 100644 --- a/src/test/regress/expected/multi_large_table_join_planning.out +++ b/src/test/regress/expected/multi_large_table_join_planning.out @@ -5,6 +5,8 @@ -- transaction block here so that we don't emit debug messages with changing -- transaction ids in them. Also, we set the executor type to task tracker -- executor here, as we cannot run repartition jobs with real time executor. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000; BEGIN; SET client_min_messages TO DEBUG4; DEBUG: CommitTransactionCommand @@ -52,29 +54,29 @@ DEBUG: join prunable for intervals [8997,11554] and [1,5986] DEBUG: join prunable for intervals [11554,13920] and [1,5986] DEBUG: join prunable for intervals [13921,14947] and [1,5986] DEBUG: generated sql query for job 1250 and task 3 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102009 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" DEBUG: generated sql query for job 1250 and task 6 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102010 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" DEBUG: generated sql query for job 1250 and task 9 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102011 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" DEBUG: generated sql query for job 1250 and task 12 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102012 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" DEBUG: generated sql query for job 1250 and task 15 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102013 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" DEBUG: generated sql query for job 1250 and task 18 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102014 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 18 to node localhost:57637 -DEBUG: assigned task 15 to node localhost:57638 +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > '10'::numeric)" +DEBUG: assigned task 3 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 9 to node localhost:57637 +DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 15 to node localhost:57637 +DEBUG: assigned task 18 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for job 1251 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_102019 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" +DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: generated sql query for job 1251 and task 6 -DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_102044 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" +DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 4 @@ -88,11 +90,11 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: generated sql query for job 1252 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_102017 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT '30'::bigint" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT '30'::bigint" DEBUG: generated sql query for job 1252 and task 6 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_102043 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT '30'::bigint" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT '30'::bigint" DEBUG: generated sql query for job 1252 and task 9 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_102042 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT '30'::bigint" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT '30'::bigint" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 @@ -100,8 +102,8 @@ DETAIL: Creating dependency on merge taskId 10 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 13 DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:57637 DEBUG: completed cleanup query for job 1252 on node "localhost:57638" DEBUG: completed cleanup query for job 1252 on node "localhost:57637" DEBUG: completed cleanup query for job 1251 on node "localhost:57638" @@ -156,29 +158,29 @@ ORDER BY l_partkey, o_orderkey; DEBUG: StartTransactionCommand DEBUG: generated sql query for job 1253 and task 2 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102009 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 4 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102010 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 6 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102011 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 8 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102012 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 10 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102013 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 12 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102014 lineitem WHERE (l_quantity < 5.0)" -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 8 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: assigned task 10 to node localhost:57638 +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)" +DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task 6 to node localhost:57637 +DEBUG: assigned task 8 to node localhost:57638 +DEBUG: assigned task 10 to node localhost:57637 +DEBUG: assigned task 12 to node localhost:57638 DEBUG: generated sql query for job 1254 and task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102015 orders WHERE (o_totalprice <> 4.0)" +DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)" DEBUG: generated sql query for job 1254 and task 4 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102016 orders WHERE (o_totalprice <> 4.0)" -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 +DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)" +DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 diff --git a/src/test/regress/expected/multi_large_table_join_planning_0.out b/src/test/regress/expected/multi_large_table_join_planning_0.out index 7ea71fb38..65c0b8ca0 100644 --- a/src/test/regress/expected/multi_large_table_join_planning_0.out +++ b/src/test/regress/expected/multi_large_table_join_planning_0.out @@ -5,6 +5,8 @@ -- transaction block here so that we don't emit debug messages with changing -- transaction ids in them. Also, we set the executor type to task tracker -- executor here, as we cannot run repartition jobs with real time executor. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000; BEGIN; SET client_min_messages TO DEBUG4; DEBUG: CommitTransactionCommand @@ -52,29 +54,29 @@ DEBUG: join prunable for intervals [8997,11554] and [1,5986] DEBUG: join prunable for intervals [11554,13920] and [1,5986] DEBUG: join prunable for intervals [13921,14947] and [1,5986] DEBUG: generated sql query for job 1250 and task 3 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102009 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" DEBUG: generated sql query for job 1250 and task 6 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102010 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" DEBUG: generated sql query for job 1250 and task 9 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102011 lineitem JOIN orders_102015 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290006 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" DEBUG: generated sql query for job 1250 and task 12 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102012 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" DEBUG: generated sql query for job 1250 and task 15 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102013 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" DEBUG: generated sql query for job 1250 and task 18 -DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_102014 lineitem JOIN orders_102016 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 -DEBUG: assigned task 18 to node localhost:57637 -DEBUG: assigned task 15 to node localhost:57638 +DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290007 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE (orders.o_totalprice > 10::numeric)" +DEBUG: assigned task 3 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 9 to node localhost:57637 +DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 15 to node localhost:57637 +DEBUG: assigned task 18 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for job 1251 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_102019 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" +DETAIL: query string: "SELECT "pg_merge_job_1250.task_000019".intermediate_column_1250_0, "pg_merge_job_1250.task_000019".intermediate_column_1250_1, "pg_merge_job_1250.task_000019".intermediate_column_1250_2, "pg_merge_job_1250.task_000019".intermediate_column_1250_3, "pg_merge_job_1250.task_000019".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000019 "pg_merge_job_1250.task_000019" JOIN part_290010 part ON (("pg_merge_job_1250.task_000019".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: generated sql query for job 1251 and task 6 -DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_102044 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" +DETAIL: query string: "SELECT "pg_merge_job_1250.task_000026".intermediate_column_1250_0, "pg_merge_job_1250.task_000026".intermediate_column_1250_1, "pg_merge_job_1250.task_000026".intermediate_column_1250_2, "pg_merge_job_1250.task_000026".intermediate_column_1250_3, "pg_merge_job_1250.task_000026".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000026 "pg_merge_job_1250.task_000026" JOIN part_280002 part ON (("pg_merge_job_1250.task_000026".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 4 @@ -88,11 +90,11 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: generated sql query for job 1252 and task 3 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_102017 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT 30::bigint" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290008 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1, "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 LIMIT 30::bigint" DEBUG: generated sql query for job 1252 and task 6 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_102043 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT 30::bigint" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1, "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 LIMIT 30::bigint" DEBUG: generated sql query for job 1252 and task 9 -DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_102042 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT 30::bigint" +DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 ORDER BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1, "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 LIMIT 30::bigint" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 @@ -100,8 +102,8 @@ DETAIL: Creating dependency on merge taskId 10 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 13 DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:57637 DEBUG: completed cleanup query for job 1252 on node "localhost:57638" DEBUG: completed cleanup query for job 1252 on node "localhost:57637" DEBUG: completed cleanup query for job 1251 on node "localhost:57638" @@ -156,29 +158,29 @@ ORDER BY l_partkey, o_orderkey; DEBUG: StartTransactionCommand DEBUG: generated sql query for job 1253 and task 2 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102009 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 4 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102010 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 6 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102011 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 8 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102012 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 10 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102013 lineitem WHERE (l_quantity < 5.0)" +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for job 1253 and task 12 -DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_102014 lineitem WHERE (l_quantity < 5.0)" -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 8 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: assigned task 10 to node localhost:57638 +DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)" +DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task 6 to node localhost:57637 +DEBUG: assigned task 8 to node localhost:57638 +DEBUG: assigned task 10 to node localhost:57637 +DEBUG: assigned task 12 to node localhost:57638 DEBUG: generated sql query for job 1254 and task 2 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102015 orders WHERE (o_totalprice <> 4.0)" +DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290006 orders WHERE (o_totalprice <> 4.0)" DEBUG: generated sql query for job 1254 and task 4 -DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_102016 orders WHERE (o_totalprice <> 4.0)" -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 +DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290007 orders WHERE (o_totalprice <> 4.0)" +DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 diff --git a/src/test/regress/expected/multi_large_table_pruning.out b/src/test/regress/expected/multi_large_table_pruning.out index d20ae46bc..cb377cf5f 100644 --- a/src/test/regress/expected/multi_large_table_pruning.out +++ b/src/test/regress/expected/multi_large_table_pruning.out @@ -4,6 +4,8 @@ -- Tests covering partition and join-pruning for large table joins. Note that we -- set executor type to task tracker executor here, as we cannot run repartition -- jobs with real time executor. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 700000; SET citus.large_table_shard_count TO 2; SET client_min_messages TO DEBUG2; SET citus.task_executor_type TO 'task-tracker'; @@ -40,8 +42,8 @@ FROM WHERE o_custkey = c_custkey AND o_orderkey < 0; -DEBUG: predicate pruning for shardId 102015 -DEBUG: predicate pruning for shardId 102016 +DEBUG: predicate pruning for shardId 290006 +DEBUG: predicate pruning for shardId 290007 count ------- @@ -56,9 +58,9 @@ FROM WHERE o_custkey = c_custkey AND c_custkey < 0; -DEBUG: predicate pruning for shardId 102017 -DEBUG: predicate pruning for shardId 102043 -DEBUG: predicate pruning for shardId 102042 +DEBUG: predicate pruning for shardId 290008 +DEBUG: predicate pruning for shardId 280001 +DEBUG: predicate pruning for shardId 280000 count ------- @@ -115,12 +117,12 @@ FROM WHERE l_partkey = c_nationkey AND l_orderkey < 0; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102012 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290003 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 count ------- diff --git a/src/test/regress/expected/multi_large_table_task_assignment.out b/src/test/regress/expected/multi_large_table_task_assignment.out index 74747311a..c5a95c149 100644 --- a/src/test/regress/expected/multi_large_table_task_assignment.out +++ b/src/test/regress/expected/multi_large_table_task_assignment.out @@ -5,6 +5,8 @@ -- and dual hash repartition joins. The tests also cover task assignment propagation -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 710000; BEGIN; SET client_min_messages TO DEBUG3; DEBUG: CommitTransactionCommand @@ -25,8 +27,8 @@ FROM WHERE o_custkey = c_custkey; DEBUG: StartTransactionCommand -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] @@ -40,8 +42,8 @@ DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 DEBUG: assigned task 9 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 6 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:57637 DEBUG: CommitTransactionCommand count ------- @@ -65,11 +67,11 @@ WHERE o_orderkey = l_orderkey; DEBUG: StartTransactionCommand DEBUG: assigned task 15 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 +DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 18 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 DEBUG: join prunable for intervals [1,2496] and [2497,4964] DEBUG: join prunable for intervals [1,2496] and [4965,5986] DEBUG: join prunable for intervals [1,2496] and [8997,11554] @@ -114,10 +116,10 @@ DEBUG: pruning merge fetch taskId 19 DETAIL: Creating dependency on merge taskId 47 DEBUG: pruning merge fetch taskId 22 DETAIL: Creating dependency on merge taskId 54 -DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 3 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: assigned task 9 to node localhost:57638 +DEBUG: assigned task 3 to node localhost:57637 +DEBUG: assigned task 6 to node localhost:57638 +DEBUG: assigned task 9 to node localhost:57637 +DEBUG: assigned task 12 to node localhost:57638 DEBUG: assigned task 18 to node localhost:57637 DEBUG: assigned task 24 to node localhost:57638 DEBUG: propagating assignment from merge task 40 to constrained sql task 15 @@ -154,15 +156,15 @@ FROM WHERE l_partkey = c_nationkey; DEBUG: StartTransactionCommand -DEBUG: assigned task 4 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 8 to node localhost:57637 -DEBUG: assigned task 6 to node localhost:57638 -DEBUG: assigned task 12 to node localhost:57637 -DEBUG: assigned task 10 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637 -DEBUG: assigned task 2 to node localhost:57638 -DEBUG: assigned task 4 to node localhost:57637 +DEBUG: assigned task 8 to node localhost:57638 +DEBUG: assigned task 10 to node localhost:57637 +DEBUG: assigned task 12 to node localhost:57638 +DEBUG: assigned task 6 to node localhost:57637 +DEBUG: assigned task 4 to node localhost:57638 +DEBUG: assigned task 2 to node localhost:57637 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 18e1adf20..4ead2585f 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -1,6 +1,8 @@ -- -- MULTI_LIMIT_CLAUSE -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 730000; -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- Check that we can correctly handle the Limit clause in distributed queries. diff --git a/src/test/regress/expected/multi_limit_clause_approximate.out b/src/test/regress/expected/multi_limit_clause_approximate.out index d1bad570e..7008a52a8 100644 --- a/src/test/regress/expected/multi_limit_clause_approximate.out +++ b/src/test/regress/expected/multi_limit_clause_approximate.out @@ -1,6 +1,8 @@ -- -- MULTI_LIMIT_CLAUSE_APPROXIMATE -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 720000; -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- We first look at results with limit optimization disabled. This first query diff --git a/src/test/regress/expected/multi_master_protocol.out b/src/test/regress/expected/multi_master_protocol.out index 3d6e627a5..b000b15af 100644 --- a/src/test/regress/expected/multi_master_protocol.out +++ b/src/test/regress/expected/multi_master_protocol.out @@ -2,6 +2,8 @@ -- MULTI_MASTER_PROTOCOL -- -- Tests that check the metadata returned by the master node. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 740000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy @@ -20,7 +22,7 @@ SELECT * FROM master_get_table_ddl_events('lineitem'); SELECT * FROM master_get_new_shardid(); master_get_new_shardid ------------------------ - 102008 + 740000 (1 row) SELECT * FROM master_get_local_first_candidate_nodes(); diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index 84b519435..4a707f733 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 750000; -- =================================================================== -- test end-to-end modification functionality -- =================================================================== @@ -93,7 +95,7 @@ INSERT INTO append_partitioned VALUES (414123, 'AAPL', 9580, '2004-10-19 10:23:5 SET client_min_messages TO 'DEBUG2'; SET citus.task_executor_type TO 'real-time'; SELECT * FROM range_partitioned WHERE id = 32743; -DEBUG: predicate pruning for shardId 103084 +DEBUG: predicate pruning for shardId 750004 DEBUG: Plan is router executable id | symbol | bidder_id | placed_at | kind | limit_price -------+--------+-----------+--------------------------+------+------------- @@ -101,7 +103,7 @@ DEBUG: Plan is router executable (1 row) SELECT * FROM append_partitioned WHERE id = 414123; -DEBUG: predicate pruning for shardId 103086 +DEBUG: predicate pruning for shardId 750006 DEBUG: Plan is router executable id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- @@ -275,7 +277,7 @@ WHERE nodename = 'localhost' AND -- Fourth: Perform the same INSERT (primary key violation) INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); WARNING: Bad result from localhost:57638 -DETAIL: Remote message: duplicate key value violates unique constraint "limit_orders_pkey_103081" +DETAIL: Remote message: duplicate key value violates unique constraint "limit_orders_pkey_750001" -- Last: Verify the insert worked but the placement with the PK violation is now unhealthy SELECT count(*) FROM limit_orders WHERE id = 275; count diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index 80795483c..fc3321652 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -3,16 +3,18 @@ -- -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 760000; SET client_min_messages TO DEBUG2; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102009; +SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; shardminvalue | shardmaxvalue ---------------+--------------- 1 | 2496 (1 row) -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102010; +SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; shardminvalue | shardmaxvalue ---------------+--------------- 2497 | 4964 @@ -21,24 +23,24 @@ SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102010; -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ - 9030 | 1 | 09-02-1998 - 9030 | 2 | 08-19-1998 - 9030 | 3 | 08-27-1998 - 9030 | 4 | 07-20-1998 - 9030 | 5 | 09-29-1998 - 9030 | 6 | 09-03-1998 1 | 1 | 03-13-1996 1 | 2 | 04-12-1996 1 | 3 | 01-29-1996 1 | 4 | 04-21-1996 1 | 5 | 03-30-1996 1 | 6 | 01-30-1996 + 9030 | 1 | 09-02-1998 + 9030 | 2 | 08-19-1998 + 9030 | 3 | 08-27-1998 + 9030 | 4 | 07-20-1998 + 9030 | 5 | 09-29-1998 + 9030 | 6 | 09-03-1998 (12 rows) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders @@ -56,12 +58,12 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] -- Now set the minimum value for a shard to null. Then check that we don't apply -- partition or join pruning for the shard with null min value. -UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 102009; +UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 9030 | 1 | 09-02-1998 @@ -86,11 +88,11 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] -- Next, set the maximum value for another shard to null. Then check that we -- don't apply partition or join pruning for this other shard either. -UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 102010; +UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 9030 | 1 | 09-02-1998 @@ -114,12 +116,12 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] -- Last, set the minimum value to 0 and check that we don't treat it as null. We -- should apply partition and join pruning for this shard now. -UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 102009; +UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 9030 | 1 | 09-02-1998 @@ -143,6 +145,6 @@ DEBUG: join prunable for intervals [13921,14947] and [1,5986] (1 row) -- Set minimum and maximum values for two shards back to their original values -UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 102009; -UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 102010; +UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; +UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001; SET client_min_messages TO NOTICE; diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index 0a3cade99..3d305b0e7 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -3,28 +3,30 @@ -- -- Tests to verify that we correctly prune unreferenced shards. For this, we -- need to increase the logging verbosity of messages displayed on the client. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 770000; SET citus.explain_distributed_queries TO off; SET client_min_messages TO DEBUG2; -- Adding additional l_orderkey = 1 to make this query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ - 9030 | 1 | 09-02-1998 - 9030 | 2 | 08-19-1998 - 9030 | 3 | 08-27-1998 - 9030 | 4 | 07-20-1998 - 9030 | 5 | 09-29-1998 - 9030 | 6 | 09-03-1998 1 | 1 | 03-13-1996 1 | 2 | 04-12-1996 1 | 3 | 01-29-1996 1 | 4 | 04-21-1996 1 | 5 | 03-30-1996 1 | 6 | 01-30-1996 + 9030 | 1 | 09-02-1998 + 9030 | 2 | 08-19-1998 + 9030 | 3 | 08-27-1998 + 9030 | 4 | 07-20-1998 + 9030 | 5 | 09-29-1998 + 9030 | 6 | 09-03-1998 (12 rows) -- We use the l_linenumber field for the following aggregations. We need to use @@ -33,9 +35,9 @@ DEBUG: predicate pruning for shardId 102014 -- trigger the the creation of toasted tables and indexes. This in turn prints -- non-deterministic debug messages. To avoid this chain, we use l_linenumber. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 sum | avg -------+-------------------- 17999 | 3.0189533713518953 @@ -43,7 +45,7 @@ DEBUG: predicate pruning for shardId 102011 SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); -DEBUG: predicate pruning for shardId 102011 +DEBUG: predicate pruning for shardId 290002 sum | avg -------+-------------------- 30184 | 3.0159872102318145 @@ -51,12 +53,12 @@ DEBUG: predicate pruning for shardId 102011 -- The following query should prune out all shards and return empty results SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000; -DEBUG: predicate pruning for shardId 102009 -DEBUG: predicate pruning for shardId 102010 -DEBUG: predicate pruning for shardId 102011 -DEBUG: predicate pruning for shardId 102012 -DEBUG: predicate pruning for shardId 102013 -DEBUG: predicate pruning for shardId 102014 +DEBUG: predicate pruning for shardId 290000 +DEBUG: predicate pruning for shardId 290001 +DEBUG: predicate pruning for shardId 290002 +DEBUG: predicate pruning for shardId 290003 +DEBUG: predicate pruning for shardId 290004 +DEBUG: predicate pruning for shardId 290005 sum | avg -----+----- | diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index e76482076..1932ce862 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -4,6 +4,8 @@ -- Many of the queries are taken from other regression test files -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 780000; CREATE FUNCTION sql_test_no_1() RETURNS bigint AS ' SELECT count(*) diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index b0d097a3f..3a54596a6 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -4,6 +4,8 @@ -- Tests covering PREPARE statements. Many of the queries are -- taken from other regression test files and converted into -- prepared statements. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 790000; PREPARE prepared_test_1 AS SELECT count(*) diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out index 934639051..ea8594c24 100644 --- a/src/test/regress/expected/multi_prune_shard_list.out +++ b/src/test/regress/expected/multi_prune_shard_list.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 800000; -- =================================================================== -- create test functions -- =================================================================== @@ -47,28 +49,28 @@ SELECT master_create_worker_shards('pruning', 4, 1); SELECT prune_using_no_values('pruning'); prune_using_no_values ------------------------------- - {103070,103071,103072,103073} + {800000,800001,800002,800003} (1 row) -- with a single value, expect a single shard SELECT prune_using_single_value('pruning', 'tomato'); prune_using_single_value -------------------------- - {103072} + {800002} (1 row) -- the above is true even if that value is null SELECT prune_using_single_value('pruning', NULL); prune_using_single_value -------------------------- - {103072} + {800002} (1 row) -- build an OR clause and expect more than one sahrd SELECT prune_using_either_value('pruning', 'tomato', 'petunia'); prune_using_either_value -------------------------- - {103071,103072} + {800001,800002} (1 row) -- an AND clause with incompatible values returns no shards @@ -82,7 +84,7 @@ SELECT prune_using_both_values('pruning', 'tomato', 'petunia'); SELECT prune_using_both_values('pruning', 'tomato', 'rose'); prune_using_both_values ------------------------- - {103072} + {800002} (1 row) -- unit test of the equality expression generation code @@ -96,7 +98,7 @@ SELECT debug_equality_expression('pruning'); SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- - {103070,103071,103072,103073} + {800000,800001,800002,800003} (1 row) -- update only min value for one shard @@ -104,7 +106,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- - {103070,103072,103073,103071} + {800000,800001,800002,800003} (1 row) -- now lets have one more shard without min/max values @@ -112,7 +114,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- - {103070,103073,103071,103072} + {800000,800001,800002,800003} (1 row) -- now lets have one more shard without min/max values @@ -120,7 +122,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- - {103073,103070,103071,103072} + {800000,800001,800002,800003} (1 row) -- all shard placements are uninitialized @@ -128,7 +130,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- - {103070,103071,103072,103073} + {800000,800001,800002,800003} (1 row) -- create range distributed table observe shard pruning @@ -143,25 +145,25 @@ SELECT master_create_distributed_table('pruning_range', 'species', 'range'); SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- - 103074 + 800004 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- - 103075 + 800005 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- - 103076 + 800006 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- - 103077 + 800007 (1 row) -- now the comparison is done via the partition column type, which is text @@ -173,7 +175,7 @@ UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- - {103074,103075,103076,103077} + {800004,800005,800006,800007} (1 row) -- update only min value for one shard @@ -181,7 +183,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- - {103074,103076,103077,103075} + {800004,800005,800006,800007} (1 row) -- now lets have one more shard without min/max values @@ -189,7 +191,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- - {103074,103077,103075,103076} + {800004,800005,800006,800007} (1 row) -- now lets have one more shard without min/max values @@ -197,7 +199,7 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- - {103077,103074,103075,103076} + {800004,800005,800006,800007} (1 row) -- all shard placements are uninitialized @@ -205,6 +207,6 @@ UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardi SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- - {103074,103075,103076,103077} + {800004,800005,800006,800007} (1 row) diff --git a/src/test/regress/expected/multi_query_directory_cleanup.out b/src/test/regress/expected/multi_query_directory_cleanup.out index a72a3a06d..65caed2e9 100644 --- a/src/test/regress/expected/multi_query_directory_cleanup.out +++ b/src/test/regress/expected/multi_query_directory_cleanup.out @@ -5,6 +5,8 @@ -- on the master node for final processing. When the query completes or fails, -- the resource owner should automatically clean up these intermediate query -- result files. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 810000; BEGIN; -- pg_ls_dir() displays jobids. We explicitly set the jobId sequence -- here so that the regression output becomes independent of the diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index bf76df9c5..29a5f96ec 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 820000; -- =================================================================== -- test shard repair functionality -- =================================================================== diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index 288ee5427..39484ca4e 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -1,6 +1,8 @@ -- -- MULTI_REPARTITIONED_SUBQUERY_UDF -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 830000; -- Create UDF in master and workers \c - - - :master_port DROP FUNCTION IF EXISTS median(double precision[]); diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 5c8578484..1971b7778 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -1,7 +1,8 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000; -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103300; CREATE TABLE articles_hash ( id bigint NOT NULL, author_id bigint NOT NULL, @@ -105,8 +106,8 @@ DEBUG: Creating router plan DEBUG: Plan is router executable -- first, test zero-shard SELECT, which should return an empty row SELECT COUNT(*) FROM articles_hash WHERE author_id = 1 AND author_id = 2; -DEBUG: predicate pruning for shardId 103300 -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840000 +DEBUG: predicate pruning for shardId 840001 count ------- @@ -116,7 +117,7 @@ DEBUG: predicate pruning for shardId 103301 -- test simple select for a single row SELECT * FROM articles_hash WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-----------+------------ @@ -126,7 +127,7 @@ DEBUG: Plan is router executable -- get all titles by a single author SELECT title FROM articles_hash WHERE author_id = 10; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable title ------------ @@ -142,7 +143,7 @@ SELECT title, word_count FROM articles_hash WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable title | word_count ------------+------------ @@ -159,7 +160,7 @@ SELECT title, id FROM articles_hash ORDER BY id LIMIT 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable title | id ---------+---- @@ -172,7 +173,7 @@ DEBUG: Plan is router executable SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 title | author_id -------------+----------- aseptic | 7 @@ -190,7 +191,7 @@ DEBUG: predicate pruning for shardId 103301 -- same query is router executable with no order by SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8; -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable title | author_id -------------+----------- @@ -222,7 +223,7 @@ SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable author_id | corpus_size -----------+------------- @@ -320,7 +321,7 @@ SELECT * FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -336,7 +337,7 @@ DEBUG: Plan is router executable SELECT * FROM articles_hash WHERE author_id = 1 OR author_id = 17; -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -366,7 +367,7 @@ SELECT id as article_id, word_count * id as random_value FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable article_id | random_value ------------+-------------- @@ -385,7 +386,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: push down of limit count: 3 -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647] DEBUG: Plan is router executable first_author | second_word_count @@ -401,7 +402,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: push down of limit count: 3 -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 first_author | second_word_count --------------+------------------- 10 | 19519 @@ -415,7 +416,7 @@ SELECT * WHERE author_id = 1 LIMIT 3; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -431,7 +432,7 @@ SELECT * LIMIT 2 OFFSET 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -447,7 +448,7 @@ SELECT * LIMIT 2 OFFSET 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -462,7 +463,7 @@ SELECT id WHERE author_id = 1 GROUP BY id; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id ---- @@ -478,7 +479,7 @@ SELECT distinct id FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id ---- @@ -494,7 +495,7 @@ SELECT avg(word_count) FROM articles_hash WHERE author_id = 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103300 +DEBUG: predicate pruning for shardId 840000 DEBUG: Plan is router executable avg -------------------- @@ -507,7 +508,7 @@ SELECT max(word_count) as max, min(word_count) as min, FROM articles_hash WHERE author_id = 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103300 +DEBUG: predicate pruning for shardId 840000 DEBUG: Plan is router executable max | min | sum | cnt -------+------+-------+----- @@ -520,7 +521,7 @@ SELECT max(word_count) WHERE author_id = 1 GROUP BY author_id; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable max ------- @@ -566,7 +567,7 @@ SET client_min_messages to 'DEBUG2'; SELECT * FROM articles_hash WHERE author_id = 1 and author_id >= 1; -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -596,7 +597,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 and (id = 1 or id = 41); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -609,7 +610,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 and (id = random()::int * 0); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ @@ -647,7 +648,7 @@ SELECT * FROM articles_hash WHERE author_id = abs(-1); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -689,7 +690,7 @@ SELECT * FROM articles_hash WHERE author_id = 1 and (id = abs(id - 2)); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -714,7 +715,7 @@ SELECT * FROM articles_hash WHERE (author_id = 1) = true; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -730,7 +731,7 @@ SELECT * FROM articles_hash WHERE (author_id = 1) and id between 0 and 20; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -743,7 +744,7 @@ SELECT * FROM articles_hash WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -756,7 +757,7 @@ SELECT * FROM articles_hash WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -769,7 +770,7 @@ SELECT * FROM articles_hash WHERE (title like '%s' or title like 'a%') and (author_id = 1); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -785,7 +786,7 @@ SELECT * FROM articles_hash WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -799,7 +800,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable prev | title | word_count ----------+----------+------------ @@ -815,7 +816,7 @@ SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count WHERE author_id = 5 ORDER BY word_count DESC; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable prev | title | word_count ----------+----------+------------ @@ -830,7 +831,7 @@ SELECT id, MIN(id) over (order by word_count) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | min ----+----- @@ -845,7 +846,7 @@ SELECT id, word_count, AVG(word_count) over (order by word_count) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | word_count | avg ----+------------+----------------------- @@ -860,7 +861,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable word_count | rank ------------+------ @@ -899,7 +900,7 @@ SELECT WHERE author_id = 5; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable c --- @@ -941,7 +942,7 @@ SELECT * WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -961,7 +962,7 @@ DECLARE test_cursor CURSOR FOR WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable FETCH test_cursor; id | author_id | title | word_count @@ -983,7 +984,7 @@ COPY ( WHERE author_id = 1 ORDER BY id) TO STDOUT; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable 1 1 arsenous 9572 11 1 alamo 1347 @@ -998,14 +999,14 @@ CREATE TEMP TABLE temp_articles_hash as WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable -- router plannable queries may include filter for aggragates SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable count | count -------+------- @@ -1028,7 +1029,7 @@ PREPARE author_1_articles as WHERE author_id = 1; EXECUTE author_1_articles; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -1046,7 +1047,7 @@ PREPARE author_articles(int) as WHERE author_id = $1; EXECUTE author_articles(1); DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -1073,7 +1074,7 @@ DEBUG: Creating router plan CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement @@ -1103,7 +1104,7 @@ CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1" @@ -1118,7 +1119,7 @@ SET citus.task_executor_type to 'task-tracker'; SELECT id FROM articles_hash WHERE author_id = 1; -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 id ---- 1 @@ -1136,7 +1137,7 @@ DEBUG: Plan is router executable SELECT id FROM articles_hash WHERE author_id = 1; -DEBUG: predicate pruning for shardId 103301 +DEBUG: predicate pruning for shardId 840001 id ---- 1 diff --git a/src/test/regress/expected/multi_shard_modify.out b/src/test/regress/expected/multi_shard_modify.out index 03a5ec3a8..59a815659 100644 --- a/src/test/regress/expected/multi_shard_modify.out +++ b/src/test/regress/expected/multi_shard_modify.out @@ -2,6 +2,7 @@ -- MULTI_SHARD_MODIFY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000; -- Create a new hash partitioned multi_shard_modify_test table and stage data into it. CREATE TABLE multi_shard_modify_test ( t_key integer not null, diff --git a/src/test/regress/expected/multi_simple_queries.out b/src/test/regress/expected/multi_simple_queries.out index 9a857511a..6684d578b 100644 --- a/src/test/regress/expected/multi_simple_queries.out +++ b/src/test/regress/expected/multi_simple_queries.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 850000; -- =================================================================== -- test end-to-end query functionality -- =================================================================== @@ -323,7 +325,7 @@ SELECT * FROM articles WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -338,7 +340,7 @@ DEBUG: Plan is router executable SELECT * FROM articles WHERE author_id = 1 OR author_id = 17; -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ @@ -367,7 +369,7 @@ SELECT id as article_id, word_count * id as random_value FROM articles WHERE author_id = 1; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: Plan is router executable article_id | random_value ------------+-------------- @@ -385,7 +387,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: push down of limit count: 3 -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647] DEBUG: Plan is router executable first_author | second_word_count @@ -402,7 +404,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: push down of limit count: 3 -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 first_author | second_word_count --------------+------------------- 10 | 19519 @@ -416,7 +418,7 @@ SELECT * WHERE author_id = 1 LIMIT 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ @@ -432,7 +434,7 @@ SELECT id WHERE author_id = 1 GROUP BY id; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: Plan is router executable id ---- @@ -452,7 +454,7 @@ SELECT avg(word_count) FROM articles WHERE author_id = 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103107 +DEBUG: predicate pruning for shardId 850000 DEBUG: Plan is router executable avg -------------------- @@ -466,7 +468,7 @@ SELECT max(word_count) as max, min(word_count) as min, FROM articles WHERE author_id = 2; DEBUG: Creating router plan -DEBUG: predicate pruning for shardId 103107 +DEBUG: predicate pruning for shardId 850000 DEBUG: Plan is router executable max | min | sum | cnt -------+------+-------+----- @@ -477,7 +479,7 @@ DEBUG: Plan is router executable SELECT * FROM articles a, articles b WHERE a.id = b.id AND a.author_id = 1; -DEBUG: predicate pruning for shardId 103108 +DEBUG: predicate pruning for shardId 850001 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 diff --git a/src/test/regress/expected/multi_single_relation_subquery.out b/src/test/regress/expected/multi_single_relation_subquery.out index 47c8c9c82..7e3358819 100644 --- a/src/test/regress/expected/multi_single_relation_subquery.out +++ b/src/test/regress/expected/multi_single_relation_subquery.out @@ -2,6 +2,8 @@ -- MULTI_SINGLE_RELATION_SUBQUERY -- -- This test checks that we are able to run selected set of distributed SQL subqueries. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 860000; SET citus.task_executor_type TO 'task-tracker'; select number_sum, diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index 20b39dabb..27400cf5e 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -2,6 +2,8 @@ -- MULTI_TABLE_DDL -- -- Tests around changing the schema and dropping of a distributed table +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 9b6cce828..f571238ad 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -1,6 +1,8 @@ -- -- MULTI_TASK_ASSIGNMENT -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 880000; SET citus.explain_distributed_queries TO off; -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard diff --git a/src/test/regress/expected/multi_tpch_query1.out b/src/test/regress/expected/multi_tpch_query1.out index 23a507e06..6bc93dc4d 100644 --- a/src/test/regress/expected/multi_tpch_query1.out +++ b/src/test/regress/expected/multi_tpch_query1.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY1 -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 890000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query10.out b/src/test/regress/expected/multi_tpch_query10.out index 82eefa6df..2923dd432 100644 --- a/src/test/regress/expected/multi_tpch_query10.out +++ b/src/test/regress/expected/multi_tpch_query10.out @@ -4,6 +4,8 @@ -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 900000; SELECT c_custkey, c_name, diff --git a/src/test/regress/expected/multi_tpch_query12.out b/src/test/regress/expected/multi_tpch_query12.out index e54d3e0a4..ca9662c3e 100644 --- a/src/test/regress/expected/multi_tpch_query12.out +++ b/src/test/regress/expected/multi_tpch_query12.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY12 -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 910000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query14.out b/src/test/regress/expected/multi_tpch_query14.out index 2eded6dda..9bf8a5a82 100644 --- a/src/test/regress/expected/multi_tpch_query14.out +++ b/src/test/regress/expected/multi_tpch_query14.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY14 -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 920000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query19.out b/src/test/regress/expected/multi_tpch_query19.out index d6ee657c1..e0f1f0d86 100644 --- a/src/test/regress/expected/multi_tpch_query19.out +++ b/src/test/regress/expected/multi_tpch_query19.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY19 -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 930000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified diff --git a/src/test/regress/expected/multi_tpch_query3.out b/src/test/regress/expected/multi_tpch_query3.out index da87dc8f5..67797e2e1 100644 --- a/src/test/regress/expected/multi_tpch_query3.out +++ b/src/test/regress/expected/multi_tpch_query3.out @@ -4,6 +4,8 @@ -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 940000; SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, diff --git a/src/test/regress/expected/multi_tpch_query6.out b/src/test/regress/expected/multi_tpch_query6.out index 9badebd9e..3d29efa07 100644 --- a/src/test/regress/expected/multi_tpch_query6.out +++ b/src/test/regress/expected/multi_tpch_query6.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY6 -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 950000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query7.out b/src/test/regress/expected/multi_tpch_query7.out index 55ab9ae63..69c8af9c4 100644 --- a/src/test/regress/expected/multi_tpch_query7.out +++ b/src/test/regress/expected/multi_tpch_query7.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY7 -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 970000; -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query7_nested.out b/src/test/regress/expected/multi_tpch_query7_nested.out index d6d6acf71..1faa07b9c 100644 --- a/src/test/regress/expected/multi_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_tpch_query7_nested.out @@ -1,6 +1,8 @@ -- -- MULTI_TPCH_QUERY7_NESTED -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 960000; -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index 12864f4e4..ec9597171 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -1,6 +1,8 @@ -- this test file aims to test UPSERT feature on Citus -- note that output of this file for postgresql 9.4 will -- be full syntax errors, which is expected. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 980000; CREATE TABLE upsert_test ( part_key int UNIQUE, diff --git a/src/test/regress/expected/multi_upsert_0.out b/src/test/regress/expected/multi_upsert_0.out index c8c1e705e..09b052c1c 100644 --- a/src/test/regress/expected/multi_upsert_0.out +++ b/src/test/regress/expected/multi_upsert_0.out @@ -1,6 +1,8 @@ -- this test file aims to test UPSERT feature on Citus -- note that output of this file for postgresql 9.4 will -- be full syntax errors, which is expected. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 980000; CREATE TABLE upsert_test ( part_key int UNIQUE, diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index 6279c4806..ec20ec71b 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 990000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 990000; -- =================================================================== -- test utility statement functionality -- =================================================================== diff --git a/src/test/regress/expected/multi_utility_statements.out b/src/test/regress/expected/multi_utility_statements.out index 1b502f20b..e4891e6c1 100644 --- a/src/test/regress/expected/multi_utility_statements.out +++ b/src/test/regress/expected/multi_utility_statements.out @@ -4,6 +4,8 @@ -- Check that we can run utility statements with embedded SELECT statements on -- distributed tables. Currently we only support CREATE TABLE AS (SELECT..), -- DECLARE CURSOR, and COPY ... TO statements. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1000000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1000000; CREATE TEMP TABLE lineitem_pricing_summary AS ( SELECT diff --git a/src/test/regress/expected/multi_utility_warnings.out b/src/test/regress/expected/multi_utility_warnings.out index b5ce4ec44..6d4f95406 100644 --- a/src/test/regress/expected/multi_utility_warnings.out +++ b/src/test/regress/expected/multi_utility_warnings.out @@ -3,6 +3,8 @@ -- -- Tests to check if we inform the user about potential caveats of creating new -- databases, schemas, and roles. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1010000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1010000; CREATE DATABASE new_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to workers diff --git a/src/test/regress/expected/multi_verify_no_join_with_alias.out b/src/test/regress/expected/multi_verify_no_join_with_alias.out index 8d33250cd..0e6e0df20 100644 --- a/src/test/regress/expected/multi_verify_no_join_with_alias.out +++ b/src/test/regress/expected/multi_verify_no_join_with_alias.out @@ -3,6 +3,8 @@ -- -- This test checks that we simply emit an error message instead of trying to -- fetch and join a shard which has an alias set. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1020000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1020000; -- Show that the join works without an alias SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey; count @@ -11,10 +13,10 @@ SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey; (1 row) -- Assign an alias to the parts shard -UPDATE pg_dist_shard SET shardalias = 'my_alias' WHERE shardid = 102019; +UPDATE pg_dist_shard SET shardalias = 'my_alias' WHERE shardid = 290000; -- Attempt a join which uses this shard SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey; -ERROR: cannot fetch shard 102019 +ERROR: cannot fetch shard 290000 DETAIL: Fetching shards with aliases is currently unsupported -- Remove the alias from the parts shard -UPDATE pg_dist_shard SET shardalias = NULL WHERE shardid = 102019; +UPDATE pg_dist_shard SET shardalias = NULL WHERE shardid = 290000; diff --git a/src/test/regress/expected/multi_verify_no_subquery.out b/src/test/regress/expected/multi_verify_no_subquery.out index db1501098..74cef3a26 100644 --- a/src/test/regress/expected/multi_verify_no_subquery.out +++ b/src/test/regress/expected/multi_verify_no_subquery.out @@ -3,6 +3,8 @@ -- -- This test checks that we simply emit an error message instead of trying to -- process a distributed unsupported SQL subquery. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1030000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1030000; SELECT * FROM lineitem WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_quantity > 0); ERROR: cannot perform distributed planning on this query diff --git a/src/test/regress/expected/multi_working_columns.out b/src/test/regress/expected/multi_working_columns.out index 38c05af49..13b5dadc1 100644 --- a/src/test/regress/expected/multi_working_columns.out +++ b/src/test/regress/expected/multi_working_columns.out @@ -5,6 +5,8 @@ -- projection order are called working (resjunk) columns. We check in here that -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1040000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1040000; SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; l_quantity ------------ diff --git a/src/test/regress/expected/task_tracker_assign_task.out b/src/test/regress/expected/task_tracker_assign_task.out index d9dff929b..ecbdfc461 100644 --- a/src/test/regress/expected/task_tracker_assign_task.out +++ b/src/test/regress/expected/task_tracker_assign_task.out @@ -1,6 +1,8 @@ -- -- TASK_TRACKER_ASSIGN_TASK -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1050000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1050000; \set JobId 401010 \set SimpleTaskId 101101 \set RecoverableTaskId 801102 diff --git a/src/test/regress/expected/task_tracker_cleanup_job.out b/src/test/regress/expected/task_tracker_cleanup_job.out index e1e63fbde..efe40e58b 100644 --- a/src/test/regress/expected/task_tracker_cleanup_job.out +++ b/src/test/regress/expected/task_tracker_cleanup_job.out @@ -1,6 +1,8 @@ -- -- TASK_TRACKER_CLEANUP_JOB -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1060000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1060000; \set JobId 401010 \set CompletedTaskId 801107 \set RunningTaskId 801108 diff --git a/src/test/regress/expected/task_tracker_create_table.out b/src/test/regress/expected/task_tracker_create_table.out index 30d45728c..d5fd46d63 100644 --- a/src/test/regress/expected/task_tracker_create_table.out +++ b/src/test/regress/expected/task_tracker_create_table.out @@ -1,6 +1,8 @@ -- -- TASK_TRACKER_CREATE_TABLE -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1070000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1070000; -- New table definitions to test the task tracker process and protocol CREATE TABLE lineitem_simple_task ( LIKE lineitem ); CREATE TABLE lineitem_compute_task ( LIKE lineitem ); diff --git a/src/test/regress/expected/task_tracker_partition_task.out b/src/test/regress/expected/task_tracker_partition_task.out index 2d36f93b0..bd61ada0f 100644 --- a/src/test/regress/expected/task_tracker_partition_task.out +++ b/src/test/regress/expected/task_tracker_partition_task.out @@ -1,6 +1,8 @@ -- -- TASK_TRACKER_PARTITION_TASK -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1080000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1080000; \set JobId 401010 \set PartitionTaskId 801106 \set PartitionColumn l_orderkey diff --git a/src/test/regress/expected/worker_binary_data_partition.out b/src/test/regress/expected/worker_binary_data_partition.out index afab11021..77d20f5e1 100644 --- a/src/test/regress/expected/worker_binary_data_partition.out +++ b/src/test/regress/expected/worker_binary_data_partition.out @@ -1,6 +1,8 @@ -- -- WORKER_BINARY_DATA_PARTITION -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1090000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1090000; \set JobId 201010 \set TaskId 101105 \set Partition_Column textcolumn diff --git a/src/test/regress/expected/worker_check_invalid_arguments.out b/src/test/regress/expected/worker_check_invalid_arguments.out index 1ecefdcb5..8affeac11 100644 --- a/src/test/regress/expected/worker_check_invalid_arguments.out +++ b/src/test/regress/expected/worker_check_invalid_arguments.out @@ -1,6 +1,8 @@ -- -- WORKER_CHECK_INVALID_ARGUMENTS -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1100000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1100000; \set JobId 201010 \set TaskId 101108 \set Table_Name simple_binary_data_table diff --git a/src/test/regress/expected/worker_create_table.out b/src/test/regress/expected/worker_create_table.out index 2d083a5ae..cc773b005 100644 --- a/src/test/regress/expected/worker_create_table.out +++ b/src/test/regress/expected/worker_create_table.out @@ -4,6 +4,8 @@ -- Create new table definitions for lineitem and supplier tables to test worker -- node execution logic. For now,the tests include range and hash partitioning -- of existing tables. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1110000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1110000; CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/expected/worker_hash_partition.out b/src/test/regress/expected/worker_hash_partition.out index f324571b2..3d7d5c94d 100644 --- a/src/test/regress/expected/worker_hash_partition.out +++ b/src/test/regress/expected/worker_hash_partition.out @@ -1,6 +1,8 @@ -- -- WORKER_HASH_PARTITION -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1130000; \set JobId 201010 \set TaskId 101103 \set Partition_Column l_orderkey diff --git a/src/test/regress/expected/worker_hash_partition_complex.out b/src/test/regress/expected/worker_hash_partition_complex.out index 2dfbe3702..29624a4c0 100644 --- a/src/test/regress/expected/worker_hash_partition_complex.out +++ b/src/test/regress/expected/worker_hash_partition_complex.out @@ -1,6 +1,8 @@ -- -- WORKER_HASH_PARTITION_COMPLEX -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1120000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1120000; \set JobId 201010 \set TaskId 101104 \set Partition_Column l_partkey diff --git a/src/test/regress/expected/worker_merge_hash_files.out b/src/test/regress/expected/worker_merge_hash_files.out index 89246b89f..671bbb5a5 100644 --- a/src/test/regress/expected/worker_merge_hash_files.out +++ b/src/test/regress/expected/worker_merge_hash_files.out @@ -1,6 +1,8 @@ -- -- WORKER_MERGE_HASH_FILES -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1140000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1140000; \set JobId 201010 \set TaskId 101103 \set Task_Table_Name public.task_101103 diff --git a/src/test/regress/expected/worker_merge_range_files.out b/src/test/regress/expected/worker_merge_range_files.out index b39f52731..264312af7 100644 --- a/src/test/regress/expected/worker_merge_range_files.out +++ b/src/test/regress/expected/worker_merge_range_files.out @@ -1,6 +1,8 @@ -- -- WORKER_MERGE_RANGE_FILES -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1150000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1150000; \set JobId 201010 \set TaskId 101101 \set Task_Table_Name public.task_101101 diff --git a/src/test/regress/expected/worker_null_data_partition.out b/src/test/regress/expected/worker_null_data_partition.out index 11881fbc9..9180fb1ec 100644 --- a/src/test/regress/expected/worker_null_data_partition.out +++ b/src/test/regress/expected/worker_null_data_partition.out @@ -1,6 +1,8 @@ -- -- WORKER_NULL_DATA_PARTITION -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1180000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1180000; \set JobId 201010 \set Range_TaskId 101106 \set Partition_Column s_nationkey diff --git a/src/test/regress/expected/worker_range_partition.out b/src/test/regress/expected/worker_range_partition.out index f33801018..85df6d48a 100644 --- a/src/test/regress/expected/worker_range_partition.out +++ b/src/test/regress/expected/worker_range_partition.out @@ -1,6 +1,8 @@ -- -- WORKER_RANGE_PARTITION -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1160000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1160000; \set JobId 201010 \set TaskId 101101 \set Partition_Column l_orderkey diff --git a/src/test/regress/expected/worker_range_partition_complex.out b/src/test/regress/expected/worker_range_partition_complex.out index 57b2c7795..fc68b2d32 100644 --- a/src/test/regress/expected/worker_range_partition_complex.out +++ b/src/test/regress/expected/worker_range_partition_complex.out @@ -1,6 +1,8 @@ -- -- WORKER_RANGE_PARTITION_COMPLEX -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1170000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1170000; \set JobId 201010 \set TaskId 101102 \set Partition_Column l_partkey diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index 469909b3d..10695bd9d 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -2,8 +2,12 @@ -- MULTI_AGG_DISTINCT -- --- Create a new range partitioned lineitem table and stage data into it +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000; + + +-- Create a new range partitioned lineitem table and stage data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 65e4375be..3573a2c15 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -2,8 +2,12 @@ -- MULTI_AGG_TYPE_CONVERSION -- --- Test aggregate type conversions using sums of integers and division operator +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 210000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 210000; + + +-- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; SELECT sum(l_suppkey) / 2 FROM lineitem; SELECT sum(l_suppkey) / 2::numeric FROM lineitem; diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index f7be6dc69..3df73f6db 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -2,11 +2,14 @@ -- MULTI_ALTER_TABLE_STATEMENTS -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 220000; + + -- Check that we can run ALTER TABLE statements on distributed tables. -- We set the shardid sequence here so that the shardids in this test -- aren't affected by changes to the previous tests. - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000; CREATE TABLE lineitem_alter ( l_orderkey bigint not null, l_partkey integer not null, @@ -178,7 +181,7 @@ ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; -- verify newly added column is not present in a worker shard \c - - - :worker_1_port -SELECT column_only_added_to_master FROM lineitem_alter_103000 LIMIT 0; +SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; \c - - - :master_port -- ddl propagation flag is reset to default, disable it again diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index af0816f7d..4f453f9e0 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -1,6 +1,12 @@ -- -- MULTI_APPEND_TABLE_TO_SHARD -- + + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 230000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 230000; + + -- Initialize tables to join CREATE TABLE multi_append_table_to_shard_right ( diff --git a/src/test/regress/input/multi_complex_count_distinct.source b/src/test/regress/input/multi_complex_count_distinct.source index d9baa897a..7620e45a2 100644 --- a/src/test/regress/input/multi_complex_count_distinct.source +++ b/src/test/regress/input/multi_complex_count_distinct.source @@ -2,6 +2,11 @@ -- COMPLEX_COUNT_DISTINCT -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 240000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 240000; + + CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index cf13682b8..daba9d182 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -2,7 +2,10 @@ -- MULTI_COPY -- + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 560000; + -- Create a new hash-partitioned table into which to COPY CREATE TABLE customer_copy_hash ( diff --git a/src/test/regress/input/multi_create_schema.source b/src/test/regress/input/multi_create_schema.source index 333188d86..b2818f5ab 100644 --- a/src/test/regress/input/multi_create_schema.source +++ b/src/test/regress/input/multi_create_schema.source @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 250000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 250000; + + CREATE SCHEMA tpch CREATE TABLE nation ( n_nationkey integer not null, diff --git a/src/test/regress/input/multi_fdw_large_shardid.source b/src/test/regress/input/multi_fdw_large_shardid.source index 75a3b1cc5..cd5104c5e 100644 --- a/src/test/regress/input/multi_fdw_large_shardid.source +++ b/src/test/regress/input/multi_fdw_large_shardid.source @@ -6,7 +6,10 @@ -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; + -- Stage additional data to start using large shard identifiers. diff --git a/src/test/regress/input/multi_fdw_stage_data.source b/src/test/regress/input/multi_fdw_stage_data.source index 8597407ca..0c7212b78 100644 --- a/src/test/regress/input/multi_fdw_stage_data.source +++ b/src/test/regress/input/multi_fdw_stage_data.source @@ -2,6 +2,11 @@ -- MULTI_FDW_STAGE_DATA -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 330000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 330000; + + -- Tests for staging foreign data in a distributed cluster. \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' diff --git a/src/test/regress/input/multi_large_shardid.source b/src/test/regress/input/multi_large_shardid.source index 99f96a7df..21f69a209 100644 --- a/src/test/regress/input/multi_large_shardid.source +++ b/src/test/regress/input/multi_large_shardid.source @@ -6,7 +6,10 @@ -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; + -- Stage additional data to start using large shard identifiers. diff --git a/src/test/regress/input/multi_master_delete_protocol.source b/src/test/regress/input/multi_master_delete_protocol.source index 646e0adf8..55ad68a3f 100644 --- a/src/test/regress/input/multi_master_delete_protocol.source +++ b/src/test/regress/input/multi_master_delete_protocol.source @@ -2,8 +2,12 @@ -- MULTI_MASTER_DELETE_PROTOCOL -- --- Create a new range partitioned customer_delete_protocol table and stage data into it. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000; + + +-- Create a new range partitioned customer_delete_protocol table and stage data into it. CREATE TABLE customer_delete_protocol ( c_custkey integer not null, c_name varchar(25) not null, diff --git a/src/test/regress/input/multi_outer_join.source b/src/test/regress/input/multi_outer_join.source index b0d0fce73..d8a6b11e1 100644 --- a/src/test/regress/input/multi_outer_join.source +++ b/src/test/regress/input/multi_outer_join.source @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 310000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 310000; + + SET citus.large_table_shard_count TO 2; SET citus.log_multi_join_order to true; SET client_min_messages TO LOG; diff --git a/src/test/regress/input/multi_stage_data.source b/src/test/regress/input/multi_stage_data.source index 9effe58d5..213521a98 100644 --- a/src/test/regress/input/multi_stage_data.source +++ b/src/test/regress/input/multi_stage_data.source @@ -1,12 +1,16 @@ -- -- MULTI_STAGE_DATA -- - -- Tests for staging data in a distributed cluster. Please note that the number -- of shards uploaded depends on two config values: citus.shard_replication_factor and -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; + + \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' diff --git a/src/test/regress/input/multi_stage_large_records.source b/src/test/regress/input/multi_stage_large_records.source index 8e015d537..496a6d757 100644 --- a/src/test/regress/input/multi_stage_large_records.source +++ b/src/test/regress/input/multi_stage_large_records.source @@ -1,11 +1,15 @@ -- -- MULTI_STAGE_LARGE_RECORDS -- - -- Tests for staging data with large records (i.e. greater than the read buffer -- size, which is 32kB) in a distributed cluster. These tests make sure that we -- are creating shards of correct size even when records are large. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 300000; + + SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); diff --git a/src/test/regress/input/multi_stage_more_data.source b/src/test/regress/input/multi_stage_more_data.source index 3ec97813f..665c8c20d 100644 --- a/src/test/regress/input/multi_stage_more_data.source +++ b/src/test/regress/input/multi_stage_more_data.source @@ -2,6 +2,11 @@ -- MULTI_STAGE_MORE_DATA -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; + + -- We stage more data to customer and part tables to test distributed joins. The -- staging causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. diff --git a/src/test/regress/input/multi_subquery.source b/src/test/regress/input/multi_subquery.source index 032b044cf..52bbd7199 100644 --- a/src/test/regress/input/multi_subquery.source +++ b/src/test/regress/input/multi_subquery.source @@ -2,8 +2,12 @@ -- MULTI_SUBQUERY -- --- Create tables for subquery tests +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 270000; + + +-- Create tables for subquery tests CREATE TABLE lineitem_subquery ( l_orderkey bigint not null, l_partkey integer not null, @@ -101,7 +105,7 @@ FROM -- Update metadata in order to make all shards equal. -UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 102024; +UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 270003; -- If group by is not on partition column then we error out. @@ -350,5 +354,3 @@ SELECT * FROM AS foo; DROP TABLE subquery_pruning_varchar_test_table; - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102026; diff --git a/src/test/regress/input/worker_copy.source b/src/test/regress/input/worker_copy.source index 0aba63e76..65ca778db 100644 --- a/src/test/regress/input/worker_copy.source +++ b/src/test/regress/input/worker_copy.source @@ -2,6 +2,11 @@ -- WORKER_COPY -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 260000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 260000; + + COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' WITH DELIMITER '|'; COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' WITH DELIMITER '|'; diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index d9a4967fa..42b25810f 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -1,6 +1,8 @@ -- -- MULTI_AGG_DISTINCT -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 200000; -- Create a new range partitioned lineitem table and stage data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index cb73a98cb..49f65752f 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -1,6 +1,8 @@ -- -- MULTI_AGG_TYPE_CONVERSION -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 210000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 210000; -- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; sum diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 378248546..467793f28 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -1,10 +1,11 @@ -- -- MULTI_ALTER_TABLE_STATEMENTS -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 220000; -- Check that we can run ALTER TABLE statements on distributed tables. -- We set the shardid sequence here so that the shardids in this test -- aren't affected by changes to the previous tests. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000; CREATE TABLE lineitem_alter ( l_orderkey bigint not null, l_partkey integer not null, @@ -166,10 +167,10 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; \STAGE lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' ERROR: null value in column "int_column2" violates not-null constraint DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -CONTEXT: COPY lineitem_alter_103006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..." +CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..." ERROR: null value in column "int_column2" violates not-null constraint DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 03-13-1996, 02-12-1996, 03-22-1996, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -CONTEXT: COPY lineitem_alter_103006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..." +CONTEXT: COPY lineitem_alter_220006, line 1: "1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|e..." \stage: failed to replicate shard to enough replicas -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; @@ -261,7 +262,7 @@ NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE INTEGER; ALTER TABLE lineitem_alter DROP COLUMN non_existent_column; WARNING: Bad result from localhost:57638 -DETAIL: Remote message: column "non_existent_column" of relation "lineitem_alter_103000" does not exist +DETAIL: Remote message: column "non_existent_column" of relation "lineitem_alter_220000" does not exist ERROR: could not execute DDL command on worker node shards ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS non_existent_column; NOTICE: column "non_existent_column" of relation "lineitem_alter" does not exist, skipping @@ -469,9 +470,9 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; -- verify newly added column is not present in a worker shard \c - - - :worker_1_port -SELECT column_only_added_to_master FROM lineitem_alter_103000 LIMIT 0; +SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; ERROR: column "column_only_added_to_master" does not exist -LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_10300... +LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000... ^ \c - - - :master_port -- ddl propagation flag is reset to default, disable it again diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index e6f4fba5b..f3a3b3c91 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -1,6 +1,8 @@ -- -- MULTI_APPEND_TABLE_TO_SHARD -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 230000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 230000; -- Initialize tables to join CREATE TABLE multi_append_table_to_shard_right ( @@ -131,7 +133,7 @@ SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage' FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right_hash'::regclass::oid = logicalrelid; -ERROR: cannot append to shardId 103011 +ERROR: cannot append to shardId 230000 DETAIL: We currently don't support appending to shards in hash-partitioned tables -- Clean up after test SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); diff --git a/src/test/regress/output/multi_complex_count_distinct.source b/src/test/regress/output/multi_complex_count_distinct.source index 93b487834..b4c6b3a04 100644 --- a/src/test/regress/output/multi_complex_count_distinct.source +++ b/src/test/regress/output/multi_complex_count_distinct.source @@ -1,6 +1,8 @@ -- -- COMPLEX_COUNT_DISTINCT -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 240000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 240000; CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 8a5d49a31..ff3990b46 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -2,6 +2,7 @@ -- MULTI_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 560000; -- Create a new hash-partitioned table into which to COPY CREATE TABLE customer_copy_hash ( c_custkey integer, diff --git a/src/test/regress/output/multi_create_schema.source b/src/test/regress/output/multi_create_schema.source index 690a309d4..0ef71898a 100644 --- a/src/test/regress/output/multi_create_schema.source +++ b/src/test/regress/output/multi_create_schema.source @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 250000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 250000; CREATE SCHEMA tpch CREATE TABLE nation ( n_nationkey integer not null, diff --git a/src/test/regress/output/multi_fdw_large_shardid.source b/src/test/regress/output/multi_fdw_large_shardid.source index 485280e68..fac7dd83f 100644 --- a/src/test/regress/output/multi_fdw_large_shardid.source +++ b/src/test/regress/output/multi_fdw_large_shardid.source @@ -5,6 +5,7 @@ -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; -- Stage additional data to start using large shard identifiers. \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' NOTICE: extension "file_fdw" already exists, skipping diff --git a/src/test/regress/output/multi_fdw_stage_data.source b/src/test/regress/output/multi_fdw_stage_data.source index 3cb781624..0605a9404 100644 --- a/src/test/regress/output/multi_fdw_stage_data.source +++ b/src/test/regress/output/multi_fdw_stage_data.source @@ -1,6 +1,8 @@ -- -- MULTI_FDW_STAGE_DATA -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 330000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 330000; -- Tests for staging foreign data in a distributed cluster. \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' NOTICE: extension "file_fdw" already exists, skipping diff --git a/src/test/regress/output/multi_large_shardid.source b/src/test/regress/output/multi_large_shardid.source index 65d8f6bda..684732ee6 100644 --- a/src/test/regress/output/multi_large_shardid.source +++ b/src/test/regress/output/multi_large_shardid.source @@ -5,6 +5,7 @@ -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 9000000; -- Stage additional data to start using large shard identifiers. \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' diff --git a/src/test/regress/output/multi_master_delete_protocol.source b/src/test/regress/output/multi_master_delete_protocol.source index a1de0751c..15c9ee819 100644 --- a/src/test/regress/output/multi_master_delete_protocol.source +++ b/src/test/regress/output/multi_master_delete_protocol.source @@ -1,6 +1,8 @@ -- -- MULTI_MASTER_DELETE_PROTOCOL -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 320000; -- Create a new range partitioned customer_delete_protocol table and stage data into it. CREATE TABLE customer_delete_protocol ( c_custkey integer not null, diff --git a/src/test/regress/output/multi_outer_join.source b/src/test/regress/output/multi_outer_join.source index ac7b7eb3c..0abc638bc 100644 --- a/src/test/regress/output/multi_outer_join.source +++ b/src/test/regress/output/multi_outer_join.source @@ -1,3 +1,5 @@ +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 310000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 310000; SET citus.large_table_shard_count TO 2; SET citus.log_multi_join_order to true; SET client_min_messages TO LOG; @@ -301,6 +303,16 @@ FROM LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ] l_custkey | r_custkey | t_custkey -----------+-----------+----------- + 21 | 21 | 21 + 22 | 22 | 22 + 23 | 23 | 23 + 24 | 24 | 24 + 25 | 25 | 25 + 26 | 26 | 26 + 27 | 27 | 27 + 28 | 28 | 28 + 29 | 29 | 29 + 30 | 30 | 30 1 | | 2 | | 3 | | @@ -316,16 +328,6 @@ LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 - 21 | 21 | 21 - 22 | 22 | 22 - 23 | 23 | 23 - 24 | 24 | 24 - 25 | 25 | 25 - 26 | 26 | 26 - 27 | 27 | 27 - 28 | 28 | 28 - 29 | 29 | 29 - 30 | 30 | 30 (25 rows) -- Right join with single shard right most table should error out @@ -347,16 +349,6 @@ FROM LOG: join order: [ "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_left" ] t_custkey | r_custkey | l_custkey -----------+-----------+----------- - 11 | 11 | 11 - 12 | 12 | 12 - 13 | 13 | 13 - 14 | 14 | 14 - 15 | 15 | 15 - 16 | 16 | - 17 | 17 | - 18 | 18 | - 19 | 19 | - 20 | 20 | 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 @@ -367,6 +359,16 @@ LOG: join order: [ "multi_outer_join_right" ][ broadcast join "multi_outer_join 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 + 11 | 11 | 11 + 12 | 12 | 12 + 13 | 13 | 13 + 14 | 14 | 14 + 15 | 15 | 15 + 16 | 16 | + 17 | 17 | + 18 | 18 | + 19 | 19 | + 20 | 20 | (20 rows) -- Make it anti-join, should display values with l_custkey is null @@ -406,6 +408,16 @@ FROM LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] l_custkey | r_custkey -----------+----------- + 21 | 21 + 22 | 22 + 23 | 23 + 24 | 24 + 25 | 25 + 26 | 26 + 27 | 27 + 28 | 28 + 29 | 29 + 30 | 30 1 | 2 | 3 | @@ -426,16 +438,6 @@ LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer | 18 | 19 | 16 - 21 | 21 - 22 | 22 - 23 | 23 - 24 | 24 - 25 | 25 - 26 | 26 - 27 | 27 - 28 | 28 - 29 | 29 - 30 | 30 (30 rows) -- full outer join + anti (right) should work with 1-1 matched shards @@ -525,11 +527,6 @@ FROM LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ] l_custkey | r_custkey | t_custkey -----------+-----------+----------- - 11 | 11 | 11 - 12 | 12 | 12 - 13 | 13 | 13 - 14 | 14 | 14 - 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 @@ -540,6 +537,11 @@ LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 + 11 | 11 | 11 + 12 | 12 | 12 + 13 | 13 | 13 + 14 | 14 | 14 + 15 | 15 | 15 (15 rows) -- inner (broadcast) join + 2 shards left (local) join should work @@ -552,6 +554,16 @@ FROM LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- + 21 | 21 | 21 + 22 | 22 | 22 + 23 | 23 | 23 + 24 | 24 | 24 + 25 | 25 | 25 + 26 | 26 | 26 + 27 | 27 | 27 + 28 | 28 | 28 + 29 | 29 | 29 + 30 | 30 | 30 1 | 1 | 2 | 2 | 3 | 3 | @@ -567,16 +579,6 @@ LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_ 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 - 21 | 21 | 21 - 22 | 22 | 22 - 23 | 23 | 23 - 24 | 24 | 24 - 25 | 25 | 25 - 26 | 26 | 26 - 27 | 27 | 27 - 28 | 28 | 28 - 29 | 29 | 29 - 30 | 30 | 30 (25 rows) -- inner (local) join + 2 shards left (dual partition) join should error out @@ -598,6 +600,16 @@ FROM LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- + 21 | 21 | 21 + 22 | 22 | 22 + 23 | 23 | 23 + 24 | 24 | 24 + 25 | 25 | 25 + 26 | 26 | 26 + 27 | 27 | 27 + 28 | 28 | 28 + 29 | 29 | 29 + 30 | 30 | 30 1 | 1 | 2 | 2 | 3 | 3 | @@ -613,16 +625,6 @@ LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_ 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 - 21 | 21 | 21 - 22 | 22 | 22 - 23 | 23 | 23 - 24 | 24 | 24 - 25 | 25 | 25 - 26 | 26 | 26 - 27 | 27 | 27 - 28 | 28 | 28 - 29 | 29 | 29 - 30 | 30 | 30 (25 rows) -- inner (broadcast) join + 2 shards left (local) + anti join should work @@ -660,16 +662,6 @@ FROM LOG: join order: [ "multi_outer_join_right" ][ local partition join "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ] t_custkey ----------- - 11 - 12 - 13 - 14 - 15 - 16 - 17 - 18 - 19 - 20 21 22 23 @@ -680,6 +672,16 @@ LOG: join order: [ "multi_outer_join_right" ][ local partition join "multi_oute 28 29 30 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 (20 rows) -- Add a shard to the left table that overlaps with multiple shards in the right diff --git a/src/test/regress/output/multi_stage_data.source b/src/test/regress/output/multi_stage_data.source index 0eed8965f..b322feb76 100644 --- a/src/test/regress/output/multi_stage_data.source +++ b/src/test/regress/output/multi_stage_data.source @@ -5,6 +5,8 @@ -- of shards uploaded depends on two config values: citus.shard_replication_factor and -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000; \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \STAGE lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \STAGE orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' diff --git a/src/test/regress/output/multi_stage_large_records.source b/src/test/regress/output/multi_stage_large_records.source index 24eaa14e6..ef707850c 100644 --- a/src/test/regress/output/multi_stage_large_records.source +++ b/src/test/regress/output/multi_stage_large_records.source @@ -4,6 +4,8 @@ -- Tests for staging data with large records (i.e. greater than the read buffer -- size, which is 32kB) in a distributed cluster. These tests make sure that we -- are creating shards of correct size even when records are large. +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 300000; SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); diff --git a/src/test/regress/output/multi_stage_more_data.source b/src/test/regress/output/multi_stage_more_data.source index 2e0f38f1d..3c2c5eba5 100644 --- a/src/test/regress/output/multi_stage_more_data.source +++ b/src/test/regress/output/multi_stage_more_data.source @@ -1,6 +1,8 @@ -- -- MULTI_STAGE_MORE_DATA -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 280000; -- We stage more data to customer and part tables to test distributed joins. The -- staging causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. diff --git a/src/test/regress/output/multi_subquery.source b/src/test/regress/output/multi_subquery.source index e6724b673..15ee33f2e 100644 --- a/src/test/regress/output/multi_subquery.source +++ b/src/test/regress/output/multi_subquery.source @@ -1,6 +1,8 @@ -- -- MULTI_SUBQUERY -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 270000; -- Create tables for subquery tests CREATE TABLE lineitem_subquery ( l_orderkey bigint not null, @@ -103,7 +105,7 @@ FROM ERROR: cannot push down this subquery DETAIL: Shards of relations in subquery need to have 1-to-1 shard partitioning -- Update metadata in order to make all shards equal. -UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 102024; +UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 270003; -- If group by is not on partition column then we error out. SELECT avg(order_count) @@ -349,9 +351,9 @@ SET client_min_messages TO DEBUG2; SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE a = 'onder' GROUP BY a) AS foo; -DEBUG: predicate pruning for shardId 102026 -DEBUG: predicate pruning for shardId 102027 -DEBUG: predicate pruning for shardId 102029 +DEBUG: predicate pruning for shardId 270005 +DEBUG: predicate pruning for shardId 270006 +DEBUG: predicate pruning for shardId 270008 count ------- (0 rows) @@ -359,9 +361,9 @@ DEBUG: predicate pruning for shardId 102029 SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE 'eren' = a GROUP BY a) AS foo; -DEBUG: predicate pruning for shardId 102026 -DEBUG: predicate pruning for shardId 102028 -DEBUG: predicate pruning for shardId 102029 +DEBUG: predicate pruning for shardId 270005 +DEBUG: predicate pruning for shardId 270007 +DEBUG: predicate pruning for shardId 270008 count ------- (0 rows) @@ -400,4 +402,3 @@ AS foo; (0 rows) DROP TABLE subquery_pruning_varchar_test_table; -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102026; diff --git a/src/test/regress/output/worker_copy.source b/src/test/regress/output/worker_copy.source index 954d47931..31c3c19c4 100644 --- a/src/test/regress/output/worker_copy.source +++ b/src/test/regress/output/worker_copy.source @@ -1,6 +1,8 @@ -- -- WORKER_COPY -- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 260000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 260000; COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' WITH DELIMITER '|'; COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' WITH DELIMITER '|'; COPY supplier FROM '@abs_srcdir@/data/supplier.data' WITH DELIMITER '|'; diff --git a/src/test/regress/sql/multi_agg_approximate_distinct.sql b/src/test/regress/sql/multi_agg_approximate_distinct.sql index 5ed4d527c..a79be18a1 100644 --- a/src/test/regress/sql/multi_agg_approximate_distinct.sql +++ b/src/test/regress/sql/multi_agg_approximate_distinct.sql @@ -2,6 +2,11 @@ -- MULTI_AGG_APPROXIMATE_DISTINCT -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 340000; + + -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; diff --git a/src/test/regress/sql/multi_array_agg.sql b/src/test/regress/sql/multi_array_agg.sql index d4031827a..541d5ebd9 100644 --- a/src/test/regress/sql/multi_array_agg.sql +++ b/src/test/regress/sql/multi_array_agg.sql @@ -2,6 +2,11 @@ -- MULTI_ARRAY_AGG -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 520000; + + -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); diff --git a/src/test/regress/sql/multi_average_expression.sql b/src/test/regress/sql/multi_average_expression.sql index 856dfc74c..68d07625e 100644 --- a/src/test/regress/sql/multi_average_expression.sql +++ b/src/test/regress/sql/multi_average_expression.sql @@ -1,11 +1,15 @@ -- -- MULTI_AVERAGE_EXPRESSION_ORDER -- - -- This test checks that the group-by columns don't need to be above an average -- expression, and can be anywhere in the projection order. This is in response -- to a bug we had due to the average expression introducing new columns. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 450000; + + SELECT sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, diff --git a/src/test/regress/sql/multi_basic_queries.sql b/src/test/regress/sql/multi_basic_queries.sql index 10e195725..f00d0a148 100644 --- a/src/test/regress/sql/multi_basic_queries.sql +++ b/src/test/regress/sql/multi_basic_queries.sql @@ -2,6 +2,11 @@ -- MULTI_BASIC_QUERIES -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 440000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 440000; + + -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. diff --git a/src/test/regress/sql/multi_binary_master_copy_format.sql b/src/test/regress/sql/multi_binary_master_copy_format.sql index 4c67eb2d2..94ca3ccfb 100644 --- a/src/test/regress/sql/multi_binary_master_copy_format.sql +++ b/src/test/regress/sql/multi_binary_master_copy_format.sql @@ -2,6 +2,11 @@ -- MULTI_BINARY_MASTER_COPY -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 430000; + + -- Try binary master copy for different executors SET citus.binary_master_copy_format TO 'on'; diff --git a/src/test/regress/sql/multi_complex_expressions.sql b/src/test/regress/sql/multi_complex_expressions.sql index 1eaafc86c..89205be85 100644 --- a/src/test/regress/sql/multi_complex_expressions.sql +++ b/src/test/regress/sql/multi_complex_expressions.sql @@ -2,6 +2,11 @@ -- MULTI_COMPLEX_EXPRESSIONS -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 420000; + + -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; diff --git a/src/test/regress/sql/multi_connection_cache.sql b/src/test/regress/sql/multi_connection_cache.sql index ea1bdff7d..442545c73 100644 --- a/src/test/regress/sql/multi_connection_cache.sql +++ b/src/test/regress/sql/multi_connection_cache.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 410000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 410000; + + -- =================================================================== -- create test functions -- =================================================================== diff --git a/src/test/regress/sql/multi_count_type_conversion.sql b/src/test/regress/sql/multi_count_type_conversion.sql index 2f84da988..ee45ff11d 100644 --- a/src/test/regress/sql/multi_count_type_conversion.sql +++ b/src/test/regress/sql/multi_count_type_conversion.sql @@ -2,6 +2,11 @@ -- MULTI_COUNT_TYPE_CONVERSION -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 400000; + + -- Verify that we can sort count(*) results correctly. We perform this check as -- our count() operations execute in two steps: worker nodes report their -- count() results, and the master node sums these counts up. During this sum(), diff --git a/src/test/regress/sql/multi_create_fdw.sql b/src/test/regress/sql/multi_create_fdw.sql index dd828a66b..e8bd25386 100644 --- a/src/test/regress/sql/multi_create_fdw.sql +++ b/src/test/regress/sql/multi_create_fdw.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 390000; + + -- =================================================================== -- get ready for the foreign data wrapper tests -- =================================================================== diff --git a/src/test/regress/sql/multi_create_insert_proxy.sql b/src/test/regress/sql/multi_create_insert_proxy.sql index bfd3adcbd..ee8de14d8 100644 --- a/src/test/regress/sql/multi_create_insert_proxy.sql +++ b/src/test/regress/sql/multi_create_insert_proxy.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 380000; + + -- =================================================================== -- test INSERT proxy creation functionality -- =================================================================== diff --git a/src/test/regress/sql/multi_create_shards.sql b/src/test/regress/sql/multi_create_shards.sql index 1180338ac..c5e38fa63 100644 --- a/src/test/regress/sql/multi_create_shards.sql +++ b/src/test/regress/sql/multi_create_shards.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 370000; + + -- =================================================================== -- create test functions and types needed for tests -- =================================================================== diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index 3509436f6..4d9db7e37 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -2,6 +2,11 @@ -- MULTI_CREATE_TABLE -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 360000; + + -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. diff --git a/src/test/regress/sql/multi_data_types.sql b/src/test/regress/sql/multi_data_types.sql index 68b84044f..eaf4c41b4 100644 --- a/src/test/regress/sql/multi_data_types.sql +++ b/src/test/regress/sql/multi_data_types.sql @@ -3,6 +3,11 @@ -- create, distribute, INSERT, SELECT and UPDATE -- =================================================================== + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 530000; + + -- create a custom type... CREATE TYPE test_composite_type AS ( i integer, diff --git a/src/test/regress/sql/multi_distribution_metadata.sql b/src/test/regress/sql/multi_distribution_metadata.sql index 897c44c71..01c0a2c76 100644 --- a/src/test/regress/sql/multi_distribution_metadata.sql +++ b/src/test/regress/sql/multi_distribution_metadata.sql @@ -2,6 +2,11 @@ -- create test functions -- =================================================================== + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 540000; + + CREATE FUNCTION load_shard_id_array(regclass) RETURNS bigint[] AS 'citus' @@ -82,13 +87,13 @@ SELECT master_create_distributed_table('events_hash', 'name', 'hash'); SELECT master_create_worker_shards('events_hash', 4, 2); -- set shardstate of one replication from each shard to 0 (invalid value) -UPDATE pg_dist_shard_placement SET shardstate = 0 WHERE nodeport = 57638 AND shardid BETWEEN 103025 AND 103028; +UPDATE pg_dist_shard_placement SET shardstate = 0 WHERE nodeport = 57638 AND shardid BETWEEN 540000 AND 540003; -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); -- should see array with first shard range -SELECT load_shard_interval_array(103025, 0); +SELECT load_shard_interval_array(540000, 0); -- should even work for range-partitioned shards -- create range distributed table @@ -104,21 +109,21 @@ SELECT master_create_empty_shard('events_range'); UPDATE pg_dist_shard SET shardminvalue = 'Aardvark', shardmaxvalue = 'Zebra' -WHERE shardid = 103029; +WHERE shardid = 540004; -SELECT load_shard_interval_array(103029, ''::text); +SELECT load_shard_interval_array(540004, ''::text); -- should see error for non-existent shard -SELECT load_shard_interval_array(103030, 0); +SELECT load_shard_interval_array(540005, 0); -- should see two placements -SELECT load_shard_placement_array(103026, false); +SELECT load_shard_placement_array(540001, false); -- only one of which is finalized -SELECT load_shard_placement_array(103026, true); +SELECT load_shard_placement_array(540001, true); -- should see error for non-existent shard -SELECT load_shard_placement_array(103031, false); +SELECT load_shard_placement_array(540001, false); -- should see column id of 'name' SELECT partition_column_id('events_hash'); @@ -139,7 +144,7 @@ SELECT column_name_to_column_id('events_hash', 'non_existent'); -- drop shard rows (must drop placements first) DELETE FROM pg_dist_shard_placement - WHERE shardid BETWEEN 103025 AND 103029; + WHERE shardid BETWEEN 540000 AND 540004; DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_hash'::regclass; DELETE FROM pg_dist_shard diff --git a/src/test/regress/sql/multi_drop_extension.sql b/src/test/regress/sql/multi_drop_extension.sql index 41ee4f896..c5320260d 100644 --- a/src/test/regress/sql/multi_drop_extension.sql +++ b/src/test/regress/sql/multi_drop_extension.sql @@ -4,6 +4,10 @@ -- Tests around dropping and recreating the extension +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 550000; + + CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); diff --git a/src/test/regress/sql/multi_dropped_column_aliases.sql b/src/test/regress/sql/multi_dropped_column_aliases.sql index 689e64a00..3c791ff28 100644 --- a/src/test/regress/sql/multi_dropped_column_aliases.sql +++ b/src/test/regress/sql/multi_dropped_column_aliases.sql @@ -2,6 +2,11 @@ -- Tests that check that our query functionality behaves as expected when the -- table schema is modified via ALTER statements. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 620000; + + SELECT count(*) FROM customer; SELECT * FROM customer LIMIT 2; diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index 3138a03d7..aa16e9d3a 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -2,6 +2,11 @@ -- MULTI_EXPLAIN -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000; + + \a\t SET citus.task_executor_type TO 'real-time'; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 0d33a0b82..24701b91a 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -6,6 +6,11 @@ -- It'd be nice to script generation of this file, but alas, that's -- not done yet. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 580000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000; + + -- DROP EXTENSION pre-created by the regression suite DROP EXTENSION citus; \c diff --git a/src/test/regress/sql/multi_fdw_create_table.sql b/src/test/regress/sql/multi_fdw_create_table.sql index e0ccd9a3e..89b9342cc 100644 --- a/src/test/regress/sql/multi_fdw_create_table.sql +++ b/src/test/regress/sql/multi_fdw_create_table.sql @@ -2,6 +2,11 @@ -- MULTI_FDW_CREATE_TABLE -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 590000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 590000; + + -- Create new table definitions for use in testing in distributed foreign data -- wrapper functionality. diff --git a/src/test/regress/sql/multi_fdw_master_protocol.sql b/src/test/regress/sql/multi_fdw_master_protocol.sql index dfd495538..7efb1ab61 100644 --- a/src/test/regress/sql/multi_fdw_master_protocol.sql +++ b/src/test/regress/sql/multi_fdw_master_protocol.sql @@ -4,6 +4,11 @@ -- Tests that check the metadata returned by the master node. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 600000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 600000; + + SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); diff --git a/src/test/regress/sql/multi_generate_ddl_commands.sql b/src/test/regress/sql/multi_generate_ddl_commands.sql index 0bd2c28fa..3e7b644a3 100644 --- a/src/test/regress/sql/multi_generate_ddl_commands.sql +++ b/src/test/regress/sql/multi_generate_ddl_commands.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 610000; + + -- =================================================================== -- create test functions -- =================================================================== diff --git a/src/test/regress/sql/multi_hash_pruning.sql b/src/test/regress/sql/multi_hash_pruning.sql index 981b6b22c..d6210d8f8 100644 --- a/src/test/regress/sql/multi_hash_pruning.sql +++ b/src/test/regress/sql/multi_hash_pruning.sql @@ -4,6 +4,11 @@ -- Tests for shard and join pruning logic on hash partitioned tables. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000; + + -- Create a table partitioned on integer column and update partition type to -- hash. Then stage data to this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 66be10f63..a71d5b82d 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -5,6 +5,11 @@ -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 640000; + + -- -- CREATE TEST TABLES -- diff --git a/src/test/regress/sql/multi_join_order_additional.sql b/src/test/regress/sql/multi_join_order_additional.sql index 221a62538..fd650e6f9 100644 --- a/src/test/regress/sql/multi_join_order_additional.sql +++ b/src/test/regress/sql/multi_join_order_additional.sql @@ -2,6 +2,11 @@ -- MULTI_JOIN_ORDER_ADDITIONAL -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 650000; + + -- Set configuration to print table join order and pruned shards SET citus.explain_distributed_queries TO off; diff --git a/src/test/regress/sql/multi_join_order_tpch_large.sql b/src/test/regress/sql/multi_join_order_tpch_large.sql index ed8ca08bd..89ab65411 100644 --- a/src/test/regress/sql/multi_join_order_tpch_large.sql +++ b/src/test/regress/sql/multi_join_order_tpch_large.sql @@ -2,6 +2,11 @@ -- MULTI_JOIN_ORDER_TPCH_LARGE -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 660000; + + -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; diff --git a/src/test/regress/sql/multi_join_pruning.sql b/src/test/regress/sql/multi_join_pruning.sql index f43632b5f..17c9bc6e1 100644 --- a/src/test/regress/sql/multi_join_pruning.sql +++ b/src/test/regress/sql/multi_join_pruning.sql @@ -2,6 +2,10 @@ -- MULTI_JOIN_PRUNING -- + +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 680000; + + -- Check that join-pruning works for joins between two large relations. For now -- we only check for join-pruning between locally partitioned relations. In the -- future we want to check for pruning between re-partitioned relations as well. diff --git a/src/test/regress/sql/multi_large_table_join_planning.sql b/src/test/regress/sql/multi_large_table_join_planning.sql index 8ad2a3fe6..91c2d25e6 100644 --- a/src/test/regress/sql/multi_large_table_join_planning.sql +++ b/src/test/regress/sql/multi_large_table_join_planning.sql @@ -1,12 +1,16 @@ -- -- MULTI_LARGE_TABLE_PLANNING -- - -- Tests that cover large table join planning. Note that we explicitly start a -- transaction block here so that we don't emit debug messages with changing -- transaction ids in them. Also, we set the executor type to task tracker -- executor here, as we cannot run repartition jobs with real time executor. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000; + + BEGIN; SET client_min_messages TO DEBUG4; SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_large_table_pruning.sql b/src/test/regress/sql/multi_large_table_pruning.sql index ec46eff0c..5a1f80c8c 100644 --- a/src/test/regress/sql/multi_large_table_pruning.sql +++ b/src/test/regress/sql/multi_large_table_pruning.sql @@ -1,11 +1,15 @@ -- -- MULTI_LARGE_TABLE_PRUNING -- - -- Tests covering partition and join-pruning for large table joins. Note that we -- set executor type to task tracker executor here, as we cannot run repartition -- jobs with real time executor. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 700000; + + SET citus.large_table_shard_count TO 2; SET client_min_messages TO DEBUG2; SET citus.task_executor_type TO 'task-tracker'; diff --git a/src/test/regress/sql/multi_large_table_task_assignment.sql b/src/test/regress/sql/multi_large_table_task_assignment.sql index 0793e4492..bc4d0fbb7 100644 --- a/src/test/regress/sql/multi_large_table_task_assignment.sql +++ b/src/test/regress/sql/multi_large_table_task_assignment.sql @@ -1,12 +1,16 @@ -- -- MULTI_LARGE_TABLE_TASK_ASSIGNMENT -- - -- Tests which cover task assignment for MapMerge jobs for single range repartition -- and dual hash repartition joins. The tests also cover task assignment propagation -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 710000; + + BEGIN; SET client_min_messages TO DEBUG3; SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_limit_clause.sql b/src/test/regress/sql/multi_limit_clause.sql index 9d68ec526..7cf7a1578 100644 --- a/src/test/regress/sql/multi_limit_clause.sql +++ b/src/test/regress/sql/multi_limit_clause.sql @@ -2,6 +2,11 @@ -- MULTI_LIMIT_CLAUSE -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 730000; + + -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; diff --git a/src/test/regress/sql/multi_limit_clause_approximate.sql b/src/test/regress/sql/multi_limit_clause_approximate.sql index 659e91ab6..1e82a1c44 100644 --- a/src/test/regress/sql/multi_limit_clause_approximate.sql +++ b/src/test/regress/sql/multi_limit_clause_approximate.sql @@ -2,6 +2,11 @@ -- MULTI_LIMIT_CLAUSE_APPROXIMATE -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 720000; + + -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; diff --git a/src/test/regress/sql/multi_master_protocol.sql b/src/test/regress/sql/multi_master_protocol.sql index 054220e76..2440619bb 100644 --- a/src/test/regress/sql/multi_master_protocol.sql +++ b/src/test/regress/sql/multi_master_protocol.sql @@ -1,9 +1,13 @@ -- -- MULTI_MASTER_PROTOCOL -- - -- Tests that check the metadata returned by the master node. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 740000; + + SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); diff --git a/src/test/regress/sql/multi_modifications.sql b/src/test/regress/sql/multi_modifications.sql index 69444d372..238fa57d2 100644 --- a/src/test/regress/sql/multi_modifications.sql +++ b/src/test/regress/sql/multi_modifications.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 750000; + + -- =================================================================== -- test end-to-end modification functionality -- =================================================================== diff --git a/src/test/regress/sql/multi_null_minmax_value_pruning.sql b/src/test/regress/sql/multi_null_minmax_value_pruning.sql index b471e926e..db8ce464b 100644 --- a/src/test/regress/sql/multi_null_minmax_value_pruning.sql +++ b/src/test/regress/sql/multi_null_minmax_value_pruning.sql @@ -5,14 +5,19 @@ -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 760000; + + SET client_min_messages TO DEBUG2; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102009; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 102010; +SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; +SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable @@ -24,7 +29,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- Now set the minimum value for a shard to null. Then check that we don't apply -- partition or join pruning for the shard with null min value. -UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 102009; +UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; @@ -34,7 +39,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- Next, set the maximum value for another shard to null. Then check that we -- don't apply partition or join pruning for this other shard either. -UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 102010; +UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; @@ -44,7 +49,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- Last, set the minimum value to 0 and check that we don't treat it as null. We -- should apply partition and join pruning for this shard now. -UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 102009; +UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; @@ -53,7 +58,7 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- Set minimum and maximum values for two shards back to their original values -UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 102009; -UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 102010; +UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; +UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001; SET client_min_messages TO NOTICE; diff --git a/src/test/regress/sql/multi_partition_pruning.sql b/src/test/regress/sql/multi_partition_pruning.sql index 128ec266a..18e58d5f5 100644 --- a/src/test/regress/sql/multi_partition_pruning.sql +++ b/src/test/regress/sql/multi_partition_pruning.sql @@ -5,6 +5,11 @@ -- Tests to verify that we correctly prune unreferenced shards. For this, we -- need to increase the logging verbosity of messages displayed on the client. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 770000; + + SET citus.explain_distributed_queries TO off; SET client_min_messages TO DEBUG2; diff --git a/src/test/regress/sql/multi_prepare_plsql.sql b/src/test/regress/sql/multi_prepare_plsql.sql index f62e256df..8fd1d0bd4 100644 --- a/src/test/regress/sql/multi_prepare_plsql.sql +++ b/src/test/regress/sql/multi_prepare_plsql.sql @@ -6,6 +6,11 @@ -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 780000; + + CREATE FUNCTION sql_test_no_1() RETURNS bigint AS ' SELECT count(*) diff --git a/src/test/regress/sql/multi_prepare_sql.sql b/src/test/regress/sql/multi_prepare_sql.sql index 346b8de6a..18417729c 100644 --- a/src/test/regress/sql/multi_prepare_sql.sql +++ b/src/test/regress/sql/multi_prepare_sql.sql @@ -6,6 +6,11 @@ -- taken from other regression test files and converted into -- prepared statements. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 790000; + + PREPARE prepared_test_1 AS SELECT count(*) diff --git a/src/test/regress/sql/multi_prune_shard_list.sql b/src/test/regress/sql/multi_prune_shard_list.sql index 25d7be0fc..9dc60213e 100644 --- a/src/test/regress/sql/multi_prune_shard_list.sql +++ b/src/test/regress/sql/multi_prune_shard_list.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 800000; + + -- =================================================================== -- create test functions -- =================================================================== diff --git a/src/test/regress/sql/multi_query_directory_cleanup.sql b/src/test/regress/sql/multi_query_directory_cleanup.sql index 0f83e4802..4192495e9 100644 --- a/src/test/regress/sql/multi_query_directory_cleanup.sql +++ b/src/test/regress/sql/multi_query_directory_cleanup.sql @@ -7,6 +7,11 @@ -- the resource owner should automatically clean up these intermediate query -- result files. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 810000; + + BEGIN; -- pg_ls_dir() displays jobids. We explicitly set the jobId sequence diff --git a/src/test/regress/sql/multi_repair_shards.sql b/src/test/regress/sql/multi_repair_shards.sql index 7912f6a34..e7647a350 100644 --- a/src/test/regress/sql/multi_repair_shards.sql +++ b/src/test/regress/sql/multi_repair_shards.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 820000; + + -- =================================================================== -- test shard repair functionality -- =================================================================== diff --git a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql index a245cd420..59d8ea030 100644 --- a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql +++ b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql @@ -2,6 +2,11 @@ -- MULTI_REPARTITIONED_SUBQUERY_UDF -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 830000; + + -- Create UDF in master and workers \c - - - :master_port DROP FUNCTION IF EXISTS median(double precision[]); diff --git a/src/test/regress/sql/multi_router_planner.sql b/src/test/regress/sql/multi_router_planner.sql index 7bbc9a985..a7da7efde 100644 --- a/src/test/regress/sql/multi_router_planner.sql +++ b/src/test/regress/sql/multi_router_planner.sql @@ -1,8 +1,11 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000; + + -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103300; - CREATE TABLE articles_hash ( id bigint NOT NULL, diff --git a/src/test/regress/sql/multi_shard_modify.sql b/src/test/regress/sql/multi_shard_modify.sql index 76c511889..65d28a7d3 100644 --- a/src/test/regress/sql/multi_shard_modify.sql +++ b/src/test/regress/sql/multi_shard_modify.sql @@ -2,7 +2,10 @@ -- MULTI_SHARD_MODIFY -- + ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000; + -- Create a new hash partitioned multi_shard_modify_test table and stage data into it. CREATE TABLE multi_shard_modify_test ( diff --git a/src/test/regress/sql/multi_simple_queries.sql b/src/test/regress/sql/multi_simple_queries.sql index a2ccca852..0f479738a 100644 --- a/src/test/regress/sql/multi_simple_queries.sql +++ b/src/test/regress/sql/multi_simple_queries.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 850000; + + -- =================================================================== -- test end-to-end query functionality -- =================================================================== diff --git a/src/test/regress/sql/multi_single_relation_subquery.sql b/src/test/regress/sql/multi_single_relation_subquery.sql index a170af39a..688de921e 100644 --- a/src/test/regress/sql/multi_single_relation_subquery.sql +++ b/src/test/regress/sql/multi_single_relation_subquery.sql @@ -1,9 +1,13 @@ -- -- MULTI_SINGLE_RELATION_SUBQUERY -- - -- This test checks that we are able to run selected set of distributed SQL subqueries. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 860000; + + SET citus.task_executor_type TO 'task-tracker'; select diff --git a/src/test/regress/sql/multi_table_ddl.sql b/src/test/regress/sql/multi_table_ddl.sql index 51247bf89..c79703777 100644 --- a/src/test/regress/sql/multi_table_ddl.sql +++ b/src/test/regress/sql/multi_table_ddl.sql @@ -3,6 +3,11 @@ -- -- Tests around changing the schema and dropping of a distributed table + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 870000; + + CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); diff --git a/src/test/regress/sql/multi_task_assignment_policy.sql b/src/test/regress/sql/multi_task_assignment_policy.sql index 1fd30b716..72dcdff11 100644 --- a/src/test/regress/sql/multi_task_assignment_policy.sql +++ b/src/test/regress/sql/multi_task_assignment_policy.sql @@ -2,8 +2,13 @@ -- MULTI_TASK_ASSIGNMENT -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 880000; + SET citus.explain_distributed_queries TO off; + -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard -- and shard placement data into system catalogs. We next run Explain command, diff --git a/src/test/regress/sql/multi_tpch_query1.sql b/src/test/regress/sql/multi_tpch_query1.sql index d6727d15a..6a2f9f5e4 100644 --- a/src/test/regress/sql/multi_tpch_query1.sql +++ b/src/test/regress/sql/multi_tpch_query1.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY1 -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 890000; + + -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query10.sql b/src/test/regress/sql/multi_tpch_query10.sql index 75bb475a5..9f136f267 100644 --- a/src/test/regress/sql/multi_tpch_query10.sql +++ b/src/test/regress/sql/multi_tpch_query10.sql @@ -6,6 +6,11 @@ -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 900000; + + SELECT c_custkey, c_name, diff --git a/src/test/regress/sql/multi_tpch_query12.sql b/src/test/regress/sql/multi_tpch_query12.sql index 380165bb2..170dbae7d 100644 --- a/src/test/regress/sql/multi_tpch_query12.sql +++ b/src/test/regress/sql/multi_tpch_query12.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY12 -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 910000; + + -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query14.sql b/src/test/regress/sql/multi_tpch_query14.sql index 82b696fec..5a730e95b 100644 --- a/src/test/regress/sql/multi_tpch_query14.sql +++ b/src/test/regress/sql/multi_tpch_query14.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY14 -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 920000; + + -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query19.sql b/src/test/regress/sql/multi_tpch_query19.sql index dfff2ea5d..52fc0e5b8 100644 --- a/src/test/regress/sql/multi_tpch_query19.sql +++ b/src/test/regress/sql/multi_tpch_query19.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY19 -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 930000; + + -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query3.sql b/src/test/regress/sql/multi_tpch_query3.sql index 62232e5fa..f96768b99 100644 --- a/src/test/regress/sql/multi_tpch_query3.sql +++ b/src/test/regress/sql/multi_tpch_query3.sql @@ -6,6 +6,11 @@ -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 940000; + + SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, diff --git a/src/test/regress/sql/multi_tpch_query6.sql b/src/test/regress/sql/multi_tpch_query6.sql index 994b9467b..af48ecb51 100644 --- a/src/test/regress/sql/multi_tpch_query6.sql +++ b/src/test/regress/sql/multi_tpch_query6.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY6 -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 950000; + + -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query7.sql b/src/test/regress/sql/multi_tpch_query7.sql index b9b3c5cd3..f0287200e 100644 --- a/src/test/regress/sql/multi_tpch_query7.sql +++ b/src/test/regress/sql/multi_tpch_query7.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY7 -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 970000; + + -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query7_nested.sql b/src/test/regress/sql/multi_tpch_query7_nested.sql index ad34dd7b5..7a80a93a2 100644 --- a/src/test/regress/sql/multi_tpch_query7_nested.sql +++ b/src/test/regress/sql/multi_tpch_query7_nested.sql @@ -2,6 +2,11 @@ -- MULTI_TPCH_QUERY7_NESTED -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 960000; + + -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_upsert.sql b/src/test/regress/sql/multi_upsert.sql index b61833eaf..394ef0bbc 100644 --- a/src/test/regress/sql/multi_upsert.sql +++ b/src/test/regress/sql/multi_upsert.sql @@ -2,6 +2,11 @@ -- note that output of this file for postgresql 9.4 will -- be full syntax errors, which is expected. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 980000; + + CREATE TABLE upsert_test ( part_key int UNIQUE, diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index 7070311f7..15fa16cdc 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -1,3 +1,8 @@ + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 990000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 990000; + + -- =================================================================== -- test utility statement functionality -- =================================================================== diff --git a/src/test/regress/sql/multi_utility_statements.sql b/src/test/regress/sql/multi_utility_statements.sql index e3ebdcec6..a29842208 100644 --- a/src/test/regress/sql/multi_utility_statements.sql +++ b/src/test/regress/sql/multi_utility_statements.sql @@ -6,6 +6,11 @@ -- distributed tables. Currently we only support CREATE TABLE AS (SELECT..), -- DECLARE CURSOR, and COPY ... TO statements. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1000000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1000000; + + CREATE TEMP TABLE lineitem_pricing_summary AS ( SELECT diff --git a/src/test/regress/sql/multi_utility_warnings.sql b/src/test/regress/sql/multi_utility_warnings.sql index 6f7503fa4..c53b9ac5b 100644 --- a/src/test/regress/sql/multi_utility_warnings.sql +++ b/src/test/regress/sql/multi_utility_warnings.sql @@ -5,6 +5,11 @@ -- Tests to check if we inform the user about potential caveats of creating new -- databases, schemas, and roles. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1010000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1010000; + + CREATE DATABASE new_database; CREATE SCHEMA new_schema; diff --git a/src/test/regress/sql/multi_verify_no_join_with_alias.sql b/src/test/regress/sql/multi_verify_no_join_with_alias.sql index b957d6227..18e446df1 100644 --- a/src/test/regress/sql/multi_verify_no_join_with_alias.sql +++ b/src/test/regress/sql/multi_verify_no_join_with_alias.sql @@ -5,13 +5,18 @@ -- This test checks that we simply emit an error message instead of trying to -- fetch and join a shard which has an alias set. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1020000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1020000; + + -- Show that the join works without an alias SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey; -- Assign an alias to the parts shard -UPDATE pg_dist_shard SET shardalias = 'my_alias' WHERE shardid = 102019; +UPDATE pg_dist_shard SET shardalias = 'my_alias' WHERE shardid = 290000; -- Attempt a join which uses this shard @@ -19,4 +24,4 @@ SELECT COUNT(*) FROM lineitem, part WHERE l_partkey = p_partkey; -- Remove the alias from the parts shard -UPDATE pg_dist_shard SET shardalias = NULL WHERE shardid = 102019; +UPDATE pg_dist_shard SET shardalias = NULL WHERE shardid = 290000; diff --git a/src/test/regress/sql/multi_verify_no_subquery.sql b/src/test/regress/sql/multi_verify_no_subquery.sql index d6d1c1b41..b94eddcca 100644 --- a/src/test/regress/sql/multi_verify_no_subquery.sql +++ b/src/test/regress/sql/multi_verify_no_subquery.sql @@ -5,6 +5,11 @@ -- This test checks that we simply emit an error message instead of trying to -- process a distributed unsupported SQL subquery. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1030000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1030000; + + SELECT * FROM lineitem WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem WHERE l_quantity > 0); diff --git a/src/test/regress/sql/multi_working_columns.sql b/src/test/regress/sql/multi_working_columns.sql index ae792ae2b..aaee2dcb3 100644 --- a/src/test/regress/sql/multi_working_columns.sql +++ b/src/test/regress/sql/multi_working_columns.sql @@ -7,6 +7,11 @@ -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1040000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1040000; + + SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; SELECT l_quantity, count(*) as count FROM lineitem diff --git a/src/test/regress/sql/task_tracker_assign_task.sql b/src/test/regress/sql/task_tracker_assign_task.sql index d9e24ee65..34bfd56f7 100644 --- a/src/test/regress/sql/task_tracker_assign_task.sql +++ b/src/test/regress/sql/task_tracker_assign_task.sql @@ -2,6 +2,11 @@ -- TASK_TRACKER_ASSIGN_TASK -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1050000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1050000; + + \set JobId 401010 \set SimpleTaskId 101101 \set RecoverableTaskId 801102 diff --git a/src/test/regress/sql/task_tracker_cleanup_job.sql b/src/test/regress/sql/task_tracker_cleanup_job.sql index be026d62f..2717f5ddf 100644 --- a/src/test/regress/sql/task_tracker_cleanup_job.sql +++ b/src/test/regress/sql/task_tracker_cleanup_job.sql @@ -2,6 +2,11 @@ -- TASK_TRACKER_CLEANUP_JOB -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1060000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1060000; + + \set JobId 401010 \set CompletedTaskId 801107 \set RunningTaskId 801108 diff --git a/src/test/regress/sql/task_tracker_create_table.sql b/src/test/regress/sql/task_tracker_create_table.sql index d27e3354e..322d8fe23 100644 --- a/src/test/regress/sql/task_tracker_create_table.sql +++ b/src/test/regress/sql/task_tracker_create_table.sql @@ -2,6 +2,11 @@ -- TASK_TRACKER_CREATE_TABLE -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1070000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1070000; + + -- New table definitions to test the task tracker process and protocol CREATE TABLE lineitem_simple_task ( LIKE lineitem ); diff --git a/src/test/regress/sql/task_tracker_partition_task.sql b/src/test/regress/sql/task_tracker_partition_task.sql index a77092882..0f465f823 100644 --- a/src/test/regress/sql/task_tracker_partition_task.sql +++ b/src/test/regress/sql/task_tracker_partition_task.sql @@ -2,6 +2,11 @@ -- TASK_TRACKER_PARTITION_TASK -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1080000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1080000; + + \set JobId 401010 \set PartitionTaskId 801106 diff --git a/src/test/regress/sql/worker_binary_data_partition.sql b/src/test/regress/sql/worker_binary_data_partition.sql index 0c2d487dc..1094dff89 100644 --- a/src/test/regress/sql/worker_binary_data_partition.sql +++ b/src/test/regress/sql/worker_binary_data_partition.sql @@ -2,6 +2,11 @@ -- WORKER_BINARY_DATA_PARTITION -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1090000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1090000; + + \set JobId 201010 \set TaskId 101105 \set Partition_Column textcolumn diff --git a/src/test/regress/sql/worker_check_invalid_arguments.sql b/src/test/regress/sql/worker_check_invalid_arguments.sql index 464e92985..6feb68ddc 100644 --- a/src/test/regress/sql/worker_check_invalid_arguments.sql +++ b/src/test/regress/sql/worker_check_invalid_arguments.sql @@ -2,6 +2,11 @@ -- WORKER_CHECK_INVALID_ARGUMENTS -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1100000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1100000; + + \set JobId 201010 \set TaskId 101108 \set Table_Name simple_binary_data_table diff --git a/src/test/regress/sql/worker_create_table.sql b/src/test/regress/sql/worker_create_table.sql index 7f9699d65..6ba2fe462 100644 --- a/src/test/regress/sql/worker_create_table.sql +++ b/src/test/regress/sql/worker_create_table.sql @@ -6,6 +6,11 @@ -- node execution logic. For now,the tests include range and hash partitioning -- of existing tables. + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1110000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1110000; + + CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/sql/worker_hash_partition.sql b/src/test/regress/sql/worker_hash_partition.sql index 0bf5e2ef8..ff1b7ec32 100644 --- a/src/test/regress/sql/worker_hash_partition.sql +++ b/src/test/regress/sql/worker_hash_partition.sql @@ -2,6 +2,11 @@ -- WORKER_HASH_PARTITION -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1130000; + + \set JobId 201010 \set TaskId 101103 \set Partition_Column l_orderkey diff --git a/src/test/regress/sql/worker_hash_partition_complex.sql b/src/test/regress/sql/worker_hash_partition_complex.sql index 0aa89ef79..32d1810fa 100644 --- a/src/test/regress/sql/worker_hash_partition_complex.sql +++ b/src/test/regress/sql/worker_hash_partition_complex.sql @@ -2,6 +2,11 @@ -- WORKER_HASH_PARTITION_COMPLEX -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1120000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1120000; + + \set JobId 201010 \set TaskId 101104 \set Partition_Column l_partkey diff --git a/src/test/regress/sql/worker_merge_hash_files.sql b/src/test/regress/sql/worker_merge_hash_files.sql index 3f8c59f00..bba66cf37 100644 --- a/src/test/regress/sql/worker_merge_hash_files.sql +++ b/src/test/regress/sql/worker_merge_hash_files.sql @@ -2,6 +2,11 @@ -- WORKER_MERGE_HASH_FILES -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1140000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1140000; + + \set JobId 201010 \set TaskId 101103 \set Task_Table_Name public.task_101103 diff --git a/src/test/regress/sql/worker_merge_range_files.sql b/src/test/regress/sql/worker_merge_range_files.sql index 26f67e94b..cd7189258 100644 --- a/src/test/regress/sql/worker_merge_range_files.sql +++ b/src/test/regress/sql/worker_merge_range_files.sql @@ -2,6 +2,11 @@ -- WORKER_MERGE_RANGE_FILES -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1150000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1150000; + + \set JobId 201010 \set TaskId 101101 \set Task_Table_Name public.task_101101 diff --git a/src/test/regress/sql/worker_null_data_partition.sql b/src/test/regress/sql/worker_null_data_partition.sql index b696c8d14..ba9aa06ed 100644 --- a/src/test/regress/sql/worker_null_data_partition.sql +++ b/src/test/regress/sql/worker_null_data_partition.sql @@ -2,6 +2,11 @@ -- WORKER_NULL_DATA_PARTITION -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1180000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1180000; + + \set JobId 201010 \set Range_TaskId 101106 \set Partition_Column s_nationkey diff --git a/src/test/regress/sql/worker_range_partition.sql b/src/test/regress/sql/worker_range_partition.sql index f6c0d87b2..5d8d4a4f8 100644 --- a/src/test/regress/sql/worker_range_partition.sql +++ b/src/test/regress/sql/worker_range_partition.sql @@ -2,6 +2,11 @@ -- WORKER_RANGE_PARTITION -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1160000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1160000; + + \set JobId 201010 \set TaskId 101101 \set Partition_Column l_orderkey diff --git a/src/test/regress/sql/worker_range_partition_complex.sql b/src/test/regress/sql/worker_range_partition_complex.sql index 14a536a57..109cc95e1 100644 --- a/src/test/regress/sql/worker_range_partition_complex.sql +++ b/src/test/regress/sql/worker_range_partition_complex.sql @@ -2,6 +2,11 @@ -- WORKER_RANGE_PARTITION_COMPLEX -- + +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1170000; +ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1170000; + + \set JobId 201010 \set TaskId 101102 \set Partition_Column l_partkey