diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 27b28653a..7bfa0efec 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1320,7 +1320,7 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC else if (IsA(originalNode, Var)) { uint32 masterTableId = 1; /* one table on the master node */ - Var *newColumn = copyObject(originalNode); + Var *newColumn = copyObject((Var *) originalNode); newColumn->varno = masterTableId; newColumn->varattno = walkerContext->columnId; walkerContext->columnId++; diff --git a/src/backend/distributed/planner/multi_planner.c b/src/backend/distributed/planner/multi_planner.c index ba9d659f0..7f532b695 100644 --- a/src/backend/distributed/planner/multi_planner.c +++ b/src/backend/distributed/planner/multi_planner.c @@ -9,6 +9,7 @@ #include "postgres.h" +#include #include #include "catalog/pg_type.h" diff --git a/src/backend/distributed/worker/task_tracker.c b/src/backend/distributed/worker/task_tracker.c index fe2cf3af9..1afabb371 100644 --- a/src/backend/distributed/worker/task_tracker.c +++ b/src/backend/distributed/worker/task_tracker.c @@ -62,7 +62,6 @@ static volatile sig_atomic_t got_SIGHUP = false; static volatile sig_atomic_t got_SIGTERM = false; /* initialization forward declarations */ -static void TaskTrackerMain(Datum main_arg); static Size TaskTrackerShmemSize(void); static void TaskTrackerShmemInit(void); @@ -108,7 +107,8 @@ TaskTrackerRegister(void) worker.bgw_flags = BGWORKER_SHMEM_ACCESS; worker.bgw_start_time = BgWorkerStart_ConsistentState; worker.bgw_restart_time = 1; - worker.bgw_main = TaskTrackerMain; + snprintf(worker.bgw_library_name, BGW_MAXLEN, "citus"); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "TaskTrackerMain"); worker.bgw_notify_pid = 0; snprintf(worker.bgw_name, BGW_MAXLEN, "task tracker"); @@ -117,7 +117,7 @@ TaskTrackerRegister(void) /* Main entry point for task tracker process. */ -static void +void TaskTrackerMain(Datum main_arg) { MemoryContext TaskTrackerContext = NULL; diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index f86b20726..3fe98fdf4 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -482,6 +482,7 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) /* run the CREATE SEQUENCE command */ ProcessUtility(commandNode, commandString, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); + CommandCounterIncrement(); createSequenceStatement = (CreateSeqStmt *) commandNode; diff --git a/src/backend/distributed/worker/worker_partition_protocol.c b/src/backend/distributed/worker/worker_partition_protocol.c index 5616428b3..d7731f426 100644 --- a/src/backend/distributed/worker/worker_partition_protocol.c +++ b/src/backend/distributed/worker/worker_partition_protocol.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "access/hash.h" diff --git a/src/include/distributed/task_tracker.h b/src/include/distributed/task_tracker.h index 756314b1c..1b76d9547 100644 --- a/src/include/distributed/task_tracker.h +++ b/src/include/distributed/task_tracker.h @@ -112,6 +112,8 @@ extern int MaxRunningTasksPerNode; /* State shared by the task tracker and task tracker protocol functions */ extern WorkerTasksSharedStateData *WorkerTasksSharedState; +/* Entry point */ +extern void TaskTrackerMain(Datum main_arg); /* Function declarations local to the worker module */ extern WorkerTask * WorkerTasksHashEnter(uint64 jobId, uint32 taskId); diff --git a/src/test/regress/expected/multi_agg_approximate_distinct.out b/src/test/regress/expected/multi_agg_approximate_distinct.out index 63884df18..5c426802e 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct.out @@ -1,7 +1,6 @@ -- -- MULTI_AGG_APPROXIMATE_DISTINCT -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000; -- Create HLL extension if present, print false result otherwise SELECT CASE WHEN COUNT(*) > 0 THEN 'CREATE EXTENSION HLL' diff --git a/src/test/regress/expected/multi_agg_approximate_distinct_0.out b/src/test/regress/expected/multi_agg_approximate_distinct_0.out index 1b552e248..da6ba329e 100644 --- a/src/test/regress/expected/multi_agg_approximate_distinct_0.out +++ b/src/test/regress/expected/multi_agg_approximate_distinct_0.out @@ -1,7 +1,6 @@ -- -- MULTI_AGG_APPROXIMATE_DISTINCT -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000; -- Create HLL extension if present, print false result otherwise SELECT CASE WHEN COUNT(*) > 0 THEN 'CREATE EXTENSION HLL' diff --git a/src/test/regress/expected/multi_average_expression.out b/src/test/regress/expected/multi_average_expression.out index c0551ec50..74511d553 100644 --- a/src/test/regress/expected/multi_average_expression.out +++ b/src/test/regress/expected/multi_average_expression.out @@ -4,7 +4,6 @@ -- This test checks that the group-by columns don't need to be above an average -- expression, and can be anywhere in the projection order. This is in response -- to a bug we had due to the average expression introducing new columns. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000; SELECT sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, diff --git a/src/test/regress/expected/multi_basic_queries.out b/src/test/regress/expected/multi_basic_queries.out index 33d2c5fe2..13393acff 100644 --- a/src/test/regress/expected/multi_basic_queries.out +++ b/src/test/regress/expected/multi_basic_queries.out @@ -1,7 +1,6 @@ -- -- MULTI_BASIC_QUERIES -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 440000; -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. SELECT count(*) FROM lineitem; diff --git a/src/test/regress/expected/multi_insert_select_behavioral_analytics_basics.out b/src/test/regress/expected/multi_behavioral_analytics_basics.out similarity index 100% rename from src/test/regress/expected/multi_insert_select_behavioral_analytics_basics.out rename to src/test/regress/expected/multi_behavioral_analytics_basics.out diff --git a/src/test/regress/expected/multi_insert_select_behavioral_analytics_single_shard_queries.out b/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out similarity index 100% rename from src/test/regress/expected/multi_insert_select_behavioral_analytics_single_shard_queries.out rename to src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out diff --git a/src/test/regress/expected/multi_complex_expressions.out b/src/test/regress/expected/multi_complex_expressions.out index 0914adb6f..4cf5d18ba 100644 --- a/src/test/regress/expected/multi_complex_expressions.out +++ b/src/test/regress/expected/multi_complex_expressions.out @@ -1,7 +1,6 @@ -- -- MULTI_COMPLEX_EXPRESSIONS -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000; -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; ?column? @@ -435,22 +434,21 @@ FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey WHERE li.l_quantity > 25 -ORDER BY - li.l_quantity, li.l_partkey, o.o_custkey +ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 l_partkey | o_custkey | l_quantity -----------+-----------+------------ - 25221 | 656 | 26.00 - 25373 | 1369 | 26.00 - 27331 | 571 | 26.00 - 27699 | 1150 | 26.00 - 28226 | 913 | 26.00 - 28635 | 1207 | 26.00 - 29101 | 1283 | 26.00 - 31143 | 640 | 26.00 - 31239 | 685 | 26.00 - 33646 | 860 | 26.00 + 655 | 58 | 50.00 + 669 | 319 | 34.00 + 699 | 1255 | 50.00 + 716 | 61 | 45.00 + 723 | 14 | 36.00 + 802 | 754 | 50.00 + 831 | 589 | 32.00 + 835 | 67 | 33.00 + 864 | 439 | 32.00 + 875 | 13 | 43.00 (10 rows) RESET client_min_messages; diff --git a/src/test/regress/expected/multi_count_type_conversion.out b/src/test/regress/expected/multi_count_type_conversion.out index 204532b48..a258ebef2 100644 --- a/src/test/regress/expected/multi_count_type_conversion.out +++ b/src/test/regress/expected/multi_count_type_conversion.out @@ -1,7 +1,6 @@ -- -- MULTI_COUNT_TYPE_CONVERSION -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000; -- Verify that we can sort count(*) results correctly. We perform this check as -- our count() operations execute in two steps: worker nodes report their -- count() results, and the master node sums these counts up. During this sum(), diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 2d8193b6a..245fbac88 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -3,7 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; -- print major version to make version-specific tests clear -SELECT substring(version(), '\d+\.\d+') AS major_version; +SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version; major_version --------------- 9.6 diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out index 10798f526..ddba84aa3 100644 --- a/src/test/regress/expected/multi_explain_0.out +++ b/src/test/regress/expected/multi_explain_0.out @@ -3,7 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; -- print major version to make version-specific tests clear -SELECT substring(version(), '\d+\.\d+') AS major_version; +SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version; major_version --------------- 9.5 diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index ada4fc4d9..72962f0a7 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -292,14 +292,15 @@ FROM (SELECT raw_events_second.user_id AS id, FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id) AS foo -GROUP BY id; -DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) GROUP BY id -DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) GROUP BY id -DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) GROUP BY id -DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) GROUP BY id +GROUP BY id +ORDER BY id; +DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) GROUP BY id ORDER BY id +DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) GROUP BY id ORDER BY id +DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) GROUP BY id ORDER BY id +DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) GROUP BY id ORDER BY id DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" -DETAIL: Key (user_id, value_1_agg)=(5, 50) already exists. +DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. CONTEXT: while executing command on localhost:57638 -- subquery one more level depth INSERT INTO agg_events @@ -315,11 +316,12 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4, FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id - GROUP BY raw_events_second.user_id) AS foo; -DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) -DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) -DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) -DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) + GROUP BY raw_events_second.user_id) AS foo +ORDER BY id; +DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) ORDER BY id +DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) ORDER BY id +DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) ORDER BY id +DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) ORDER BY id DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 793f928bc..18e1adf20 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -1,7 +1,6 @@ -- -- MULTI_LIMIT_CLAUSE -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000; -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- Check that we can correctly handle the Limit clause in distributed queries. diff --git a/src/test/regress/expected/multi_limit_clause_approximate.out b/src/test/regress/expected/multi_limit_clause_approximate.out index 8700e63fb..42e08877b 100644 --- a/src/test/regress/expected/multi_limit_clause_approximate.out +++ b/src/test/regress/expected/multi_limit_clause_approximate.out @@ -1,7 +1,6 @@ -- -- MULTI_LIMIT_CLAUSE_APPROXIMATE -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000; -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- We first look at results with limit optimization disabled. This first query diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 8348344b7..358915664 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -661,8 +661,8 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; -- Check that CREATE INDEX statement is propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; +SET client_min_messages TO 'ERROR'; CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1); -WARNING: hash indexes are not WAL-logged and their use is discouraged CREATE UNIQUE INDEX mx_index_4 ON mx_test_schema_2.mx_table_2(col1); \c - - - :worker_1_port \d mx_test_schema_2.mx_table_2 diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index e103c2183..1f347e16a 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -1,5 +1,4 @@ -- Tests related to distributed DDL commands on mx cluster -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000; SELECT * FROM mx_ddl_table ORDER BY key; key | value -----+------- @@ -207,10 +206,10 @@ SELECT groupid FROM pg_dist_local_group; 12 (1 row) -SELECT * FROM mx_sequence_value_seq; - sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called ------------------------+------------------+------------------+--------------+------------------+------------------+-------------+---------+-----------+----------- - mx_sequence_value_seq | 3377699720527873 | 3377699720527873 | 1 | 3659174697238529 | 3377699720527873 | 1 | 0 | f | f +SELECT last_value FROM mx_sequence_value_seq; + last_value +------------------ + 3377699720527873 (1 row) \c - - - :worker_2_port @@ -220,10 +219,10 @@ SELECT groupid FROM pg_dist_local_group; 14 (1 row) -SELECT * FROM mx_sequence_value_seq; - sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called ------------------------+------------------+------------------+--------------+------------------+------------------+-------------+---------+-----------+----------- - mx_sequence_value_seq | 3940649673949185 | 3940649673949185 | 1 | 4222124650659841 | 3940649673949185 | 1 | 0 | f | f +SELECT last_value FROM mx_sequence_value_seq; + last_value +------------------ + 3940649673949185 (1 row) \c - - - :master_port diff --git a/src/test/regress/expected/multi_mx_reference_table.out b/src/test/regress/expected/multi_mx_reference_table.out index d49c0cb06..3e788d2e6 100644 --- a/src/test/regress/expected/multi_mx_reference_table.out +++ b/src/test/regress/expected/multi_mx_reference_table.out @@ -312,26 +312,35 @@ WHERE (2 rows) -- set operations are supported -(SELECT * FROM reference_table_test WHERE value_1 = 1) -UNION -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + UNION + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) -(SELECT * FROM reference_table_test WHERE value_1 = 1) -EXCEPT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + EXCEPT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) -(SELECT * FROM reference_table_test WHERE value_1 = 1) -INTERSECT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + INTERSECT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index da1fa9646..4bee82e09 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -1,4 +1,3 @@ -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== @@ -355,7 +354,8 @@ SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 3 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable id | subtitle | count @@ -385,7 +385,8 @@ SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP HINT: Consider using an equality filter on the distributed table's partition column. -- queries which involve functions in FROM clause are supported if it goes to a single worker. @@ -708,43 +709,26 @@ DEBUG: Plan is router executable -- router plannable union queries are supported -(SELECT * FROM articles_hash_mx WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash_mx WHERE author_id = 3); -DEBUG: Creating router plan -DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ - 3 | 3 | asternal | 10480 - 43 | 3 | affixal | 12723 - 23 | 3 | abhorring | 6799 - 13 | 3 | aseyev | 2255 - 11 | 1 | alamo | 1347 - 41 | 1 | aznavour | 11814 - 1 | 1 | arsenous | 9572 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 33 | 3 | autochrome | 8180 -(10 rows) - SELECT * FROM ( - (SELECT * FROM articles_hash_mx WHERE author_id = 1) + SELECT * FROM articles_hash_mx WHERE author_id = 1 UNION - (SELECT * FROM articles_hash_mx WHERE author_id = 3)) uu; + SELECT * FROM articles_hash_mx WHERE author_id = 3 +) AS combination +ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ - 3 | 3 | asternal | 10480 - 43 | 3 | affixal | 12723 - 23 | 3 | abhorring | 6799 - 13 | 3 | aseyev | 2255 - 11 | 1 | alamo | 1347 - 41 | 1 | aznavour | 11814 1 | 1 | arsenous | 9572 + 3 | 3 | asternal | 10480 + 11 | 1 | alamo | 1347 + 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 + 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 + 41 | 1 | aznavour | 11814 + 43 | 3 | affixal | 12723 (10 rows) (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) @@ -767,17 +751,20 @@ DEBUG: Plan is router executable a (1 row) -(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1) -EXCEPT -(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3); +SELECT * FROM ( + SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1 + EXCEPT + SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3 +) AS combination +ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable left ------ + al + ar at az - ar - al (4 rows) -- union queries are not supported if not router plannable diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index 74b577c97..be9296ed5 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_SCHEMA_SUPPORT -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000; -- connect to a worker node and run some queries \c - - - :worker_1_port -- test very basic queries diff --git a/src/test/regress/expected/multi_mx_tpch_query1.out b/src/test/regress/expected/multi_mx_tpch_query1.out index 9c3a8092f..8f4ea7750 100644 --- a/src/test/regress/expected/multi_mx_tpch_query1.out +++ b/src/test/regress/expected/multi_mx_tpch_query1.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY1 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large @@ -38,7 +37,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark @@ -73,7 +71,6 @@ ORDER BY -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_mx_tpch_query10.out b/src/test/regress/expected/multi_mx_tpch_query10.out index 675902e8f..bd172ea19 100644 --- a/src/test/regress/expected/multi_mx_tpch_query10.out +++ b/src/test/regress/expected/multi_mx_tpch_query10.out @@ -4,7 +4,6 @@ -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; -- connect to master \c - - - :master_port SELECT @@ -65,7 +64,6 @@ LIMIT 20; -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; SELECT c_custkey, c_name, @@ -124,7 +122,6 @@ LIMIT 20; -- connect to the other worker \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; SELECT c_custkey, c_name, diff --git a/src/test/regress/expected/multi_mx_tpch_query12.out b/src/test/regress/expected/multi_mx_tpch_query12.out index f4bce62ac..9c5cbe810 100644 --- a/src/test/regress/expected/multi_mx_tpch_query12.out +++ b/src/test/regress/expected/multi_mx_tpch_query12.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY12 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large @@ -43,7 +42,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark @@ -83,7 +81,6 @@ ORDER BY -- connect to the other worker node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_mx_tpch_query14.out b/src/test/regress/expected/multi_mx_tpch_query14.out index 418157a2b..c63ed7b4a 100644 --- a/src/test/regress/expected/multi_mx_tpch_query14.out +++ b/src/test/regress/expected/multi_mx_tpch_query14.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY14 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large @@ -27,7 +26,6 @@ WHERE -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark @@ -51,7 +49,6 @@ WHERE -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_mx_tpch_query19.out b/src/test/regress/expected/multi_mx_tpch_query19.out index 30e0b78d9..654ae6d42 100644 --- a/src/test/regress/expected/multi_mx_tpch_query19.out +++ b/src/test/regress/expected/multi_mx_tpch_query19.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY19 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large @@ -44,7 +43,6 @@ WHERE -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified @@ -85,7 +83,6 @@ WHERE -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified diff --git a/src/test/regress/expected/multi_mx_tpch_query3.out b/src/test/regress/expected/multi_mx_tpch_query3.out index 2c714236d..00405aea3 100644 --- a/src/test/regress/expected/multi_mx_tpch_query3.out +++ b/src/test/regress/expected/multi_mx_tpch_query3.out @@ -4,7 +4,6 @@ -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; -- connect to the coordinator \c - - - :master_port SELECT @@ -51,7 +50,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, @@ -96,7 +94,6 @@ ORDER BY -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, diff --git a/src/test/regress/expected/multi_mx_tpch_query6.out b/src/test/regress/expected/multi_mx_tpch_query6.out index 94fbcb004..4746a7b1b 100644 --- a/src/test/regress/expected/multi_mx_tpch_query6.out +++ b/src/test/regress/expected/multi_mx_tpch_query6.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY6 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large @@ -23,7 +22,6 @@ WHERE -- connect to one of the worker nodes \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark @@ -43,7 +41,6 @@ WHERE -- connect to the other worker node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_mx_tpch_query7.out b/src/test/regress/expected/multi_mx_tpch_query7.out index ad92569b5..64c05a084 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7.out +++ b/src/test/regress/expected/multi_mx_tpch_query7.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY7 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem AND orders tables as large @@ -53,7 +52,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark @@ -103,7 +101,6 @@ ORDER BY -- connect to the other worker node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_mx_tpch_query7_nested.out b/src/test/regress/expected/multi_mx_tpch_query7_nested.out index 04e1dd7a2..3640ec043 100644 --- a/src/test/regress/expected/multi_mx_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_mx_tpch_query7_nested.out @@ -1,7 +1,6 @@ -- -- MULTI_MX_TPCH_QUERY7_NESTED -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem AND orders tables AS large @@ -62,7 +61,6 @@ ORDER BY -- connect to one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects @@ -121,7 +119,6 @@ ORDER BY -- connect to the coordinator \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 9417f742b..424d522c3 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -4,7 +4,6 @@ -- Many of the queries are taken from other regression test files -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000; CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 374223972..9083aca01 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -4,7 +4,6 @@ -- Tests covering PREPARE statements. Many of the queries are -- taken from other regression test files and converted into -- prepared statements. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000; PREPARE prepared_test_1 AS SELECT count(*) diff --git a/src/test/regress/expected/multi_reference_table.out b/src/test/regress/expected/multi_reference_table.out index 59700a0cd..dc1b94891 100644 --- a/src/test/regress/expected/multi_reference_table.out +++ b/src/test/regress/expected/multi_reference_table.out @@ -360,26 +360,35 @@ WHERE (2 rows) -- set operations are supported -(SELECT * FROM reference_table_test WHERE value_1 = 1) -UNION -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + UNION + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) -(SELECT * FROM reference_table_test WHERE value_1 = 1) -EXCEPT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + EXCEPT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) -(SELECT * FROM reference_table_test WHERE value_1 = 1) -INTERSECT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + INTERSECT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 369ee2f32..c6ac05c4d 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -453,7 +453,8 @@ SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 3 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable id | subtitle | count @@ -483,7 +484,8 @@ SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 2 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP HINT: Consider using an equality filter on the distributed table's partition column. -- queries which involve functions in FROM clause are supported if it goes to a single worker. @@ -820,45 +822,27 @@ DEBUG: Plan is router executable 11814 (1 row) - -- router plannable union queries are supported -(SELECT * FROM articles_hash WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash WHERE author_id = 3); -DEBUG: Creating router plan -DEBUG: Plan is router executable - id | author_id | title | word_count -----+-----------+--------------+------------ - 3 | 3 | asternal | 10480 - 43 | 3 | affixal | 12723 - 23 | 3 | abhorring | 6799 - 13 | 3 | aseyev | 2255 - 11 | 1 | alamo | 1347 - 41 | 1 | aznavour | 11814 - 1 | 1 | arsenous | 9572 - 21 | 1 | arcading | 5890 - 31 | 1 | athwartships | 7271 - 33 | 3 | autochrome | 8180 -(10 rows) - SELECT * FROM ( - (SELECT * FROM articles_hash WHERE author_id = 1) + SELECT * FROM articles_hash WHERE author_id = 1 UNION - (SELECT * FROM articles_hash WHERE author_id = 3)) uu; + SELECT * FROM articles_hash WHERE author_id = 3 +) AS combination +ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ - 3 | 3 | asternal | 10480 - 43 | 3 | affixal | 12723 - 23 | 3 | abhorring | 6799 - 13 | 3 | aseyev | 2255 - 11 | 1 | alamo | 1347 - 41 | 1 | aznavour | 11814 1 | 1 | arsenous | 9572 + 3 | 3 | asternal | 10480 + 11 | 1 | alamo | 1347 + 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 + 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 + 41 | 1 | aznavour | 11814 + 43 | 3 | affixal | 12723 (10 rows) (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1) @@ -881,17 +865,20 @@ DEBUG: Plan is router executable a (1 row) -(SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 1) -EXCEPT -(SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 3); +SELECT * FROM ( + SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 1 + EXCEPT + SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 3 +) AS combination +ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable left ------ + al + ar at az - ar - al (4 rows) -- union queries are not supported if not router plannable @@ -1379,31 +1366,37 @@ DEBUG: Plan is router executable -- union/difference /intersection with where false -- this query was not originally router plannable, addition of 1=0 -- makes it router plannable -(SELECT * FROM articles_hash WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash WHERE author_id = 2 and 1=0); +SELECT * FROM ( + SELECT * FROM articles_hash WHERE author_id = 1 + UNION + SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 +) AS combination +ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ - 11 | 1 | alamo | 1347 - 41 | 1 | aznavour | 11814 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 + 41 | 1 | aznavour | 11814 (5 rows) -(SELECT * FROM articles_hash WHERE author_id = 1) -EXCEPT -(SELECT * FROM articles_hash WHERE author_id = 2 and 1=0); +SELECT * FROM ( + SELECT * FROM articles_hash WHERE author_id = 1 + EXCEPT + SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 +) AS combination +ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 + 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 - 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 (5 rows) diff --git a/src/test/regress/expected/multi_tpch_query1.out b/src/test/regress/expected/multi_tpch_query1.out index 35a541c50..23a507e06 100644 --- a/src/test/regress/expected/multi_tpch_query1.out +++ b/src/test/regress/expected/multi_tpch_query1.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY1 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query10.out b/src/test/regress/expected/multi_tpch_query10.out index 5213bc5af..82eefa6df 100644 --- a/src/test/regress/expected/multi_tpch_query10.out +++ b/src/test/regress/expected/multi_tpch_query10.out @@ -4,7 +4,6 @@ -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000; SELECT c_custkey, c_name, diff --git a/src/test/regress/expected/multi_tpch_query12.out b/src/test/regress/expected/multi_tpch_query12.out index a6270cfba..e54d3e0a4 100644 --- a/src/test/regress/expected/multi_tpch_query12.out +++ b/src/test/regress/expected/multi_tpch_query12.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY12 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query14.out b/src/test/regress/expected/multi_tpch_query14.out index 0ee550e84..2eded6dda 100644 --- a/src/test/regress/expected/multi_tpch_query14.out +++ b/src/test/regress/expected/multi_tpch_query14.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY14 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query19.out b/src/test/regress/expected/multi_tpch_query19.out index 2deb3aca3..d6ee657c1 100644 --- a/src/test/regress/expected/multi_tpch_query19.out +++ b/src/test/regress/expected/multi_tpch_query19.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY19 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified diff --git a/src/test/regress/expected/multi_tpch_query3.out b/src/test/regress/expected/multi_tpch_query3.out index 853c4e493..da87dc8f5 100644 --- a/src/test/regress/expected/multi_tpch_query3.out +++ b/src/test/regress/expected/multi_tpch_query3.out @@ -4,7 +4,6 @@ -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000; SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, diff --git a/src/test/regress/expected/multi_tpch_query6.out b/src/test/regress/expected/multi_tpch_query6.out index 66398d859..9badebd9e 100644 --- a/src/test/regress/expected/multi_tpch_query6.out +++ b/src/test/regress/expected/multi_tpch_query6.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY6 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query7.out b/src/test/regress/expected/multi_tpch_query7.out index d14202e92..55ab9ae63 100644 --- a/src/test/regress/expected/multi_tpch_query7.out +++ b/src/test/regress/expected/multi_tpch_query7.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY7 -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000; -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark diff --git a/src/test/regress/expected/multi_tpch_query7_nested.out b/src/test/regress/expected/multi_tpch_query7_nested.out index ed2ff9529..d6d6acf71 100644 --- a/src/test/regress/expected/multi_tpch_query7_nested.out +++ b/src/test/regress/expected/multi_tpch_query7_nested.out @@ -1,7 +1,6 @@ -- -- MULTI_TPCH_QUERY7_NESTED -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000; -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects diff --git a/src/test/regress/expected/multi_working_columns.out b/src/test/regress/expected/multi_working_columns.out index f1d735b96..38c05af49 100644 --- a/src/test/regress/expected/multi_working_columns.out +++ b/src/test/regress/expected/multi_working_columns.out @@ -5,7 +5,6 @@ -- projection order are called working (resjunk) columns. We check in here that -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1040000; SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; l_quantity ------------ diff --git a/src/test/regress/expected/task_tracker_assign_task.out b/src/test/regress/expected/task_tracker_assign_task.out index f3f59b6e9..d9dff929b 100644 --- a/src/test/regress/expected/task_tracker_assign_task.out +++ b/src/test/regress/expected/task_tracker_assign_task.out @@ -1,7 +1,6 @@ -- -- TASK_TRACKER_ASSIGN_TASK -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1050000; \set JobId 401010 \set SimpleTaskId 101101 \set RecoverableTaskId 801102 diff --git a/src/test/regress/expected/task_tracker_partition_task.out b/src/test/regress/expected/task_tracker_partition_task.out index 3a3b821b8..2d36f93b0 100644 --- a/src/test/regress/expected/task_tracker_partition_task.out +++ b/src/test/regress/expected/task_tracker_partition_task.out @@ -1,7 +1,6 @@ -- -- TASK_TRACKER_PARTITION_TASK -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1080000; \set JobId 401010 \set PartitionTaskId 801106 \set PartitionColumn l_orderkey diff --git a/src/test/regress/expected/worker_binary_data_partition.out b/src/test/regress/expected/worker_binary_data_partition.out index 1c8a1f9cd..afab11021 100644 --- a/src/test/regress/expected/worker_binary_data_partition.out +++ b/src/test/regress/expected/worker_binary_data_partition.out @@ -1,7 +1,6 @@ -- -- WORKER_BINARY_DATA_PARTITION -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1090000; \set JobId 201010 \set TaskId 101105 \set Partition_Column textcolumn diff --git a/src/test/regress/expected/worker_hash_partition.out b/src/test/regress/expected/worker_hash_partition.out index afc7bba3c..61725f4f7 100644 --- a/src/test/regress/expected/worker_hash_partition.out +++ b/src/test/regress/expected/worker_hash_partition.out @@ -1,7 +1,6 @@ -- -- WORKER_HASH_PARTITION -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; \set JobId 201010 \set TaskId 101103 \set Partition_Column l_orderkey diff --git a/src/test/regress/expected/worker_hash_partition_complex.out b/src/test/regress/expected/worker_hash_partition_complex.out index 1b1727cf8..2dfbe3702 100644 --- a/src/test/regress/expected/worker_hash_partition_complex.out +++ b/src/test/regress/expected/worker_hash_partition_complex.out @@ -1,7 +1,6 @@ -- -- WORKER_HASH_PARTITION_COMPLEX -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1120000; \set JobId 201010 \set TaskId 101104 \set Partition_Column l_partkey diff --git a/src/test/regress/expected/worker_merge_hash_files.out b/src/test/regress/expected/worker_merge_hash_files.out index 82cb81cf1..89246b89f 100644 --- a/src/test/regress/expected/worker_merge_hash_files.out +++ b/src/test/regress/expected/worker_merge_hash_files.out @@ -1,7 +1,6 @@ -- -- WORKER_MERGE_HASH_FILES -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1140000; \set JobId 201010 \set TaskId 101103 \set Task_Table_Name public.task_101103 diff --git a/src/test/regress/expected/worker_merge_range_files.out b/src/test/regress/expected/worker_merge_range_files.out index 80f7797cc..b39f52731 100644 --- a/src/test/regress/expected/worker_merge_range_files.out +++ b/src/test/regress/expected/worker_merge_range_files.out @@ -1,7 +1,6 @@ -- -- WORKER_MERGE_RANGE_FILES -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1150000; \set JobId 201010 \set TaskId 101101 \set Task_Table_Name public.task_101101 diff --git a/src/test/regress/expected/worker_null_data_partition.out b/src/test/regress/expected/worker_null_data_partition.out index fc14e6d12..11881fbc9 100644 --- a/src/test/regress/expected/worker_null_data_partition.out +++ b/src/test/regress/expected/worker_null_data_partition.out @@ -1,7 +1,6 @@ -- -- WORKER_NULL_DATA_PARTITION -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1180000; \set JobId 201010 \set Range_TaskId 101106 \set Partition_Column s_nationkey diff --git a/src/test/regress/expected/worker_range_partition.out b/src/test/regress/expected/worker_range_partition.out index 0590bdba7..14e6203ed 100644 --- a/src/test/regress/expected/worker_range_partition.out +++ b/src/test/regress/expected/worker_range_partition.out @@ -1,7 +1,6 @@ -- -- WORKER_RANGE_PARTITION -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1160000; \set JobId 201010 \set TaskId 101101 \set Partition_Column l_orderkey diff --git a/src/test/regress/expected/worker_range_partition_complex.out b/src/test/regress/expected/worker_range_partition_complex.out index 15ac92db9..57b2c7795 100644 --- a/src/test/regress/expected/worker_range_partition_complex.out +++ b/src/test/regress/expected/worker_range_partition_complex.out @@ -1,7 +1,6 @@ -- -- WORKER_RANGE_PARTITION_COMPLEX -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1170000; \set JobId 201010 \set TaskId 101102 \set Partition_Column l_partkey diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index 6f601b9ca..0c36c0284 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; - - -- Create a new range partitioned lineitem table and load data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 393245e25..80b08cb34 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 210000; - - -- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; SELECT sum(l_suppkey) / 2 FROM lineitem; diff --git a/src/test/regress/input/multi_insert_select_behavioral_analytics_create_table.source b/src/test/regress/input/multi_behavioral_analytics_create_table.source similarity index 97% rename from src/test/regress/input/multi_insert_select_behavioral_analytics_create_table.source rename to src/test/regress/input/multi_behavioral_analytics_create_table.source index e68fe2edf..e4f31af0b 100644 --- a/src/test/regress/input/multi_insert_select_behavioral_analytics_create_table.source +++ b/src/test/regress/input/multi_behavioral_analytics_create_table.source @@ -1,5 +1,5 @@ -- --- multi insert select behavioral analytics +-- multi behavioral analytics -- this file is intended to create the table requires for the tests -- diff --git a/src/test/regress/input/multi_mx_copy_data.source b/src/test/regress/input/multi_mx_copy_data.source index a231d4ad8..525216771 100644 --- a/src/test/regress/input/multi_mx_copy_data.source +++ b/src/test/regress/input/multi_mx_copy_data.source @@ -2,8 +2,6 @@ -- MULTI_MX_COPY_DATA -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; - \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; SET search_path TO citus_mx_test_schema; diff --git a/src/test/regress/input/multi_subquery.source b/src/test/regress/input/multi_subquery.source index ed2dae668..736774b18 100644 --- a/src/test/regress/input/multi_subquery.source +++ b/src/test/regress/input/multi_subquery.source @@ -7,7 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; -- print major version to make version-specific tests clear SHOW server_version \gset -SELECT substring(:'server_version', '\d+\.\d+') AS major_version; +SELECT substring(:'server_version', '\d+(?:\.\d+)?') AS major_version; -- Create tables for subquery tests @@ -1060,7 +1060,9 @@ FROM subquery_1.user_id, hasdone) AS subquery_top GROUP BY - hasdone; + hasdone +ORDER BY + event_average; -- Union, left join and having subquery pushdown SELECT diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 9812e323b..bc7d0d977 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -30,8 +30,8 @@ test: multi_create_table_constraints test: multi_master_protocol test: multi_load_data -test: multi_insert_select_behavioral_analytics_create_table -test: multi_insert_select_behavioral_analytics_basics multi_insert_select_behavioral_analytics_single_shard_queries multi_insert_select_non_pushable_queries +test: multi_behavioral_analytics_create_table +test: multi_behavioral_analytics_basics multi_behavioral_analytics_single_shard_queries multi_insert_select_non_pushable_queries test: multi_insert_select # ---------- diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index 3f0ab6db6..a62e4bd56 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -1,7 +1,6 @@ -- -- MULTI_AGG_DISTINCT -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 200000; -- Create a new range partitioned lineitem table and load data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index 599709891..1d8e1f64d 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -1,7 +1,6 @@ -- -- MULTI_AGG_TYPE_CONVERSION -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 210000; -- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; sum diff --git a/src/test/regress/output/multi_insert_select_behavioral_analytics_create_table.source b/src/test/regress/output/multi_behavioral_analytics_create_table.source similarity index 98% rename from src/test/regress/output/multi_insert_select_behavioral_analytics_create_table.source rename to src/test/regress/output/multi_behavioral_analytics_create_table.source index c149e77a8..3506fe929 100644 --- a/src/test/regress/output/multi_insert_select_behavioral_analytics_create_table.source +++ b/src/test/regress/output/multi_behavioral_analytics_create_table.source @@ -1,5 +1,5 @@ -- --- multi insert select behavioral analytics +-- multi behavioral analytics -- this file is intended to create the table requires for the tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; diff --git a/src/test/regress/output/multi_mx_copy_data.source b/src/test/regress/output/multi_mx_copy_data.source index 7883641fd..06ba5790a 100644 --- a/src/test/regress/output/multi_mx_copy_data.source +++ b/src/test/regress/output/multi_mx_copy_data.source @@ -1,7 +1,6 @@ -- -- MULTI_MX_COPY_DATA -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; SET search_path TO citus_mx_test_schema; \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; diff --git a/src/test/regress/output/multi_subquery.source b/src/test/regress/output/multi_subquery.source index 040ca4e7f..6e7648244 100644 --- a/src/test/regress/output/multi_subquery.source +++ b/src/test/regress/output/multi_subquery.source @@ -4,7 +4,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; -- print major version to make version-specific tests clear SHOW server_version \gset -SELECT substring(:'server_version', '\d+\.\d+') AS major_version; +SELECT substring(:'server_version', '\d+(?:\.\d+)?') AS major_version; major_version --------------- 9.6 @@ -1021,11 +1021,13 @@ FROM subquery_1.user_id, hasdone) AS subquery_top GROUP BY - hasdone; + hasdone +ORDER BY + event_average; event_average | hasdone --------------------+--------------------- - 4.0000000000000000 | Has not done paying 2.5000000000000000 | Has done paying + 4.0000000000000000 | Has not done paying (2 rows) -- Union, left join and having subquery pushdown diff --git a/src/test/regress/output/multi_subquery_0.source b/src/test/regress/output/multi_subquery_0.source index 205379549..24b5640f7 100644 --- a/src/test/regress/output/multi_subquery_0.source +++ b/src/test/regress/output/multi_subquery_0.source @@ -4,7 +4,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; -- print major version to make version-specific tests clear SHOW server_version \gset -SELECT substring(:'server_version', '\d+\.\d+') AS major_version; +SELECT substring(:'server_version', '\d+(?:\.\d+)?') AS major_version; major_version --------------- 9.5 @@ -1021,11 +1021,13 @@ FROM subquery_1.user_id, hasdone) AS subquery_top GROUP BY - hasdone; + hasdone +ORDER BY + event_average; event_average | hasdone --------------------+--------------------- - 4.0000000000000000 | Has not done paying 2.5000000000000000 | Has done paying + 4.0000000000000000 | Has not done paying (2 rows) -- Union, left join and having subquery pushdown diff --git a/src/test/regress/sql/multi_agg_approximate_distinct.sql b/src/test/regress/sql/multi_agg_approximate_distinct.sql index 1dabe864c..a6904baf0 100644 --- a/src/test/regress/sql/multi_agg_approximate_distinct.sql +++ b/src/test/regress/sql/multi_agg_approximate_distinct.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000; - -- Create HLL extension if present, print false result otherwise SELECT CASE WHEN COUNT(*) > 0 THEN 'CREATE EXTENSION HLL' diff --git a/src/test/regress/sql/multi_average_expression.sql b/src/test/regress/sql/multi_average_expression.sql index 2f1774f43..a8da49e7c 100644 --- a/src/test/regress/sql/multi_average_expression.sql +++ b/src/test/regress/sql/multi_average_expression.sql @@ -6,9 +6,6 @@ -- to a bug we had due to the average expression introducing new columns. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000; - - SELECT sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, diff --git a/src/test/regress/sql/multi_basic_queries.sql b/src/test/regress/sql/multi_basic_queries.sql index 583ec0bc3..881a7a18f 100644 --- a/src/test/regress/sql/multi_basic_queries.sql +++ b/src/test/regress/sql/multi_basic_queries.sql @@ -2,13 +2,10 @@ -- MULTI_BASIC_QUERIES -- - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 440000; - - -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. + SELECT count(*) FROM lineitem; SELECT sum(l_extendedprice) FROM lineitem; diff --git a/src/test/regress/sql/multi_insert_select_behavioral_analytics_basics.sql b/src/test/regress/sql/multi_behavioral_analytics_basics.sql similarity index 100% rename from src/test/regress/sql/multi_insert_select_behavioral_analytics_basics.sql rename to src/test/regress/sql/multi_behavioral_analytics_basics.sql diff --git a/src/test/regress/sql/multi_insert_select_behavioral_analytics_single_shard_queries.sql b/src/test/regress/sql/multi_behavioral_analytics_single_shard_queries.sql similarity index 100% rename from src/test/regress/sql/multi_insert_select_behavioral_analytics_single_shard_queries.sql rename to src/test/regress/sql/multi_behavioral_analytics_single_shard_queries.sql diff --git a/src/test/regress/sql/multi_complex_expressions.sql b/src/test/regress/sql/multi_complex_expressions.sql index 4891b2103..45ebb7fd8 100644 --- a/src/test/regress/sql/multi_complex_expressions.sql +++ b/src/test/regress/sql/multi_complex_expressions.sql @@ -2,12 +2,9 @@ -- MULTI_COMPLEX_EXPRESSIONS -- - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000; - - -- Check that we can correctly handle complex expressions and aggregates. + SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; @@ -217,8 +214,7 @@ FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey WHERE li.l_quantity > 25 -ORDER BY - li.l_quantity, li.l_partkey, o.o_custkey +ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_count_type_conversion.sql b/src/test/regress/sql/multi_count_type_conversion.sql index b0059c299..315f8ebb8 100644 --- a/src/test/regress/sql/multi_count_type_conversion.sql +++ b/src/test/regress/sql/multi_count_type_conversion.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000; - - -- Verify that we can sort count(*) results correctly. We perform this check as -- our count() operations execute in two steps: worker nodes report their -- count() results, and the master node sums these counts up. During this sum(), diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index e744b1f31..1a79f75bc 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -5,7 +5,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; -- print major version to make version-specific tests clear -SELECT substring(version(), '\d+\.\d+') AS major_version; +SELECT substring(version(), '\d+(?:\.\d+)?') AS major_version; \a\t diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql index 4e5d8a4c6..53c510f4e 100644 --- a/src/test/regress/sql/multi_insert_select.sql +++ b/src/test/regress/sql/multi_insert_select.sql @@ -235,7 +235,8 @@ FROM (SELECT raw_events_second.user_id AS id, FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id) AS foo -GROUP BY id; +GROUP BY id +ORDER BY id; -- subquery one more level depth @@ -252,7 +253,8 @@ FROM (SELECT SUM(raw_events_second.value_4) AS v4, FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id - GROUP BY raw_events_second.user_id) AS foo; + GROUP BY raw_events_second.user_id) AS foo +ORDER BY id; -- join between subqueries INSERT INTO agg_events diff --git a/src/test/regress/sql/multi_limit_clause.sql b/src/test/regress/sql/multi_limit_clause.sql index bfe3ead77..6ddc9f770 100644 --- a/src/test/regress/sql/multi_limit_clause.sql +++ b/src/test/regress/sql/multi_limit_clause.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000; - - -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; diff --git a/src/test/regress/sql/multi_limit_clause_approximate.sql b/src/test/regress/sql/multi_limit_clause_approximate.sql index 8d2cf19bb..ab81768b4 100644 --- a/src/test/regress/sql/multi_limit_clause_approximate.sql +++ b/src/test/regress/sql/multi_limit_clause_approximate.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000; - - -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index e7510d3ff..d698e8b08 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -258,6 +258,7 @@ SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; -- Check that CREATE INDEX statement is propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; +SET client_min_messages TO 'ERROR'; CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1); CREATE UNIQUE INDEX mx_index_4 ON mx_test_schema_2.mx_table_2(col1); \c - - - :worker_1_port diff --git a/src/test/regress/sql/multi_mx_ddl.sql b/src/test/regress/sql/multi_mx_ddl.sql index 9363b94d8..3c4301dc1 100644 --- a/src/test/regress/sql/multi_mx_ddl.sql +++ b/src/test/regress/sql/multi_mx_ddl.sql @@ -1,7 +1,5 @@ -- Tests related to distributed DDL commands on mx cluster -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000; - SELECT * FROM mx_ddl_table ORDER BY key; -- CREATE INDEX @@ -105,12 +103,12 @@ SELECT create_distributed_table('mx_sequence', 'key'); \c - - - :worker_1_port SELECT groupid FROM pg_dist_local_group; -SELECT * FROM mx_sequence_value_seq; +SELECT last_value FROM mx_sequence_value_seq; \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; -SELECT * FROM mx_sequence_value_seq; +SELECT last_value FROM mx_sequence_value_seq; \c - - - :master_port diff --git a/src/test/regress/sql/multi_mx_reference_table.sql b/src/test/regress/sql/multi_mx_reference_table.sql index b612ea91e..84d002e9e 100644 --- a/src/test/regress/sql/multi_mx_reference_table.sql +++ b/src/test/regress/sql/multi_mx_reference_table.sql @@ -195,17 +195,26 @@ WHERE value_1 = 1 OR value_1 = 2; -- set operations are supported -(SELECT * FROM reference_table_test WHERE value_1 = 1) -UNION -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + UNION + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; -(SELECT * FROM reference_table_test WHERE value_1 = 1) -EXCEPT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + EXCEPT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; -(SELECT * FROM reference_table_test WHERE value_1 = 1) -INTERSECT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + INTERSECT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; -- to make the tests more interested for aggregation tests, ingest some more data \c - - - :master_port diff --git a/src/test/regress/sql/multi_mx_router_planner.sql b/src/test/regress/sql/multi_mx_router_planner.sql index 0eb66b353..9b76c7830 100644 --- a/src/test/regress/sql/multi_mx_router_planner.sql +++ b/src/test/regress/sql/multi_mx_router_planner.sql @@ -1,6 +1,4 @@ -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; - -- =================================================================== -- test router planner functionality for single shard select queries @@ -202,14 +200,16 @@ SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 3 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; -- grouping sets are not supported on multiple shards SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; @@ -331,14 +331,12 @@ SELECT max(word_count) -- router plannable union queries are supported -(SELECT * FROM articles_hash_mx WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash_mx WHERE author_id = 3); - SELECT * FROM ( - (SELECT * FROM articles_hash_mx WHERE author_id = 1) + SELECT * FROM articles_hash_mx WHERE author_id = 1 UNION - (SELECT * FROM articles_hash_mx WHERE author_id = 3)) uu; + SELECT * FROM articles_hash_mx WHERE author_id = 3 +) AS combination +ORDER BY id; (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) UNION @@ -348,9 +346,12 @@ UNION INTERSECT (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); -(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1) -EXCEPT -(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3); +SELECT * FROM ( + SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1 + EXCEPT + SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3 +) AS combination +ORDER BY 1; -- union queries are not supported if not router plannable -- there is an inconsistency on shard pruning between diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql index b4cadeb80..999b6d799 100644 --- a/src/test/regress/sql/multi_mx_schema_support.sql +++ b/src/test/regress/sql/multi_mx_schema_support.sql @@ -2,8 +2,6 @@ -- MULTI_MX_SCHEMA_SUPPORT -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000; - -- connect to a worker node and run some queries \c - - - :worker_1_port diff --git a/src/test/regress/sql/multi_mx_tpch_query1.sql b/src/test/regress/sql/multi_mx_tpch_query1.sql index 690059122..45a54ac40 100644 --- a/src/test/regress/sql/multi_mx_tpch_query1.sql +++ b/src/test/regress/sql/multi_mx_tpch_query1.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; - -- connect to the coordinator \c - - - :master_port @@ -39,8 +37,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; @@ -72,8 +68,6 @@ ORDER BY -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_mx_tpch_query10.sql b/src/test/regress/sql/multi_mx_tpch_query10.sql index bbfe4e1cd..bc26f8acf 100644 --- a/src/test/regress/sql/multi_mx_tpch_query10.sql +++ b/src/test/regress/sql/multi_mx_tpch_query10.sql @@ -7,8 +7,6 @@ -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; - -- connect to master \c - - - :master_port @@ -49,8 +47,6 @@ LIMIT 20; -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; - SELECT c_custkey, c_name, @@ -87,8 +83,6 @@ LIMIT 20; -- connect to the other worker \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; - SELECT c_custkey, c_name, diff --git a/src/test/regress/sql/multi_mx_tpch_query12.sql b/src/test/regress/sql/multi_mx_tpch_query12.sql index a2c838158..ef47d3040 100644 --- a/src/test/regress/sql/multi_mx_tpch_query12.sql +++ b/src/test/regress/sql/multi_mx_tpch_query12.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000; - -- connect to the coordinator \c - - - :master_port @@ -46,8 +44,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; @@ -86,8 +82,6 @@ ORDER BY -- connect to the other worker node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_mx_tpch_query14.sql b/src/test/regress/sql/multi_mx_tpch_query14.sql index 153add47a..22fa70273 100644 --- a/src/test/regress/sql/multi_mx_tpch_query14.sql +++ b/src/test/regress/sql/multi_mx_tpch_query14.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000; - -- connect to the coordinator \c - - - :master_port @@ -31,8 +29,6 @@ WHERE -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; @@ -56,8 +52,6 @@ WHERE -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_mx_tpch_query19.sql b/src/test/regress/sql/multi_mx_tpch_query19.sql index aa14143df..a0b8c0f56 100644 --- a/src/test/regress/sql/multi_mx_tpch_query19.sql +++ b/src/test/regress/sql/multi_mx_tpch_query19.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; - -- connect to the coordinator \c - - - :master_port @@ -48,8 +46,6 @@ WHERE -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; @@ -90,8 +86,6 @@ WHERE -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_mx_tpch_query3.sql b/src/test/regress/sql/multi_mx_tpch_query3.sql index 991edf1ce..a9c27734f 100644 --- a/src/test/regress/sql/multi_mx_tpch_query3.sql +++ b/src/test/regress/sql/multi_mx_tpch_query3.sql @@ -7,8 +7,6 @@ -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; - -- connect to the coordinator \c - - - :master_port @@ -38,8 +36,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; - SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, @@ -66,8 +62,6 @@ ORDER BY -- connect to the other node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; - SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, diff --git a/src/test/regress/sql/multi_mx_tpch_query6.sql b/src/test/regress/sql/multi_mx_tpch_query6.sql index 61e8cf0bf..d28854c98 100644 --- a/src/test/regress/sql/multi_mx_tpch_query6.sql +++ b/src/test/regress/sql/multi_mx_tpch_query6.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; - -- connect to the coordinator \c - - - :master_port @@ -27,8 +25,6 @@ WHERE -- connect to one of the worker nodes \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; @@ -48,8 +44,6 @@ WHERE -- connect to the other worker node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_mx_tpch_query7.sql b/src/test/regress/sql/multi_mx_tpch_query7.sql index 4f6b22d84..410213f26 100644 --- a/src/test/regress/sql/multi_mx_tpch_query7.sql +++ b/src/test/regress/sql/multi_mx_tpch_query7.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; - -- connect to the coordinator \c - - - :master_port @@ -57,8 +55,6 @@ ORDER BY -- connect one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; - -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; @@ -108,8 +104,6 @@ ORDER BY -- connect to the other worker node \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; - -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_mx_tpch_query7_nested.sql b/src/test/regress/sql/multi_mx_tpch_query7_nested.sql index e71c5d345..cfe36a25a 100644 --- a/src/test/regress/sql/multi_mx_tpch_query7_nested.sql +++ b/src/test/regress/sql/multi_mx_tpch_query7_nested.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; - -- connect to the coordinator \c - - - :master_port @@ -66,8 +64,6 @@ ORDER BY -- connect to one of the workers \c - - - :worker_1_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; - -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; @@ -126,8 +122,6 @@ ORDER BY -- connect to the coordinator \c - - - :worker_2_port -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; - -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_prepare_plsql.sql b/src/test/regress/sql/multi_prepare_plsql.sql index 59d278b3d..f4cd15489 100644 --- a/src/test/regress/sql/multi_prepare_plsql.sql +++ b/src/test/regress/sql/multi_prepare_plsql.sql @@ -7,9 +7,6 @@ -- use prepared statements internally. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000; - - CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN diff --git a/src/test/regress/sql/multi_prepare_sql.sql b/src/test/regress/sql/multi_prepare_sql.sql index 734b2f904..a49f154b0 100644 --- a/src/test/regress/sql/multi_prepare_sql.sql +++ b/src/test/regress/sql/multi_prepare_sql.sql @@ -7,9 +7,6 @@ -- prepared statements. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000; - - PREPARE prepared_test_1 AS SELECT count(*) diff --git a/src/test/regress/sql/multi_reference_table.sql b/src/test/regress/sql/multi_reference_table.sql index dd8e2d90f..46dcaa3bf 100644 --- a/src/test/regress/sql/multi_reference_table.sql +++ b/src/test/regress/sql/multi_reference_table.sql @@ -228,17 +228,26 @@ WHERE value_1 = 1 OR value_1 = 2; -- set operations are supported -(SELECT * FROM reference_table_test WHERE value_1 = 1) -UNION -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + UNION + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; -(SELECT * FROM reference_table_test WHERE value_1 = 1) -EXCEPT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + EXCEPT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; -(SELECT * FROM reference_table_test WHERE value_1 = 1) -INTERSECT -(SELECT * FROM reference_table_test WHERE value_1 = 3); +SELECT * FROM ( + SELECT * FROM reference_table_test WHERE value_1 = 1 + INTERSECT + SELECT * FROM reference_table_test WHERE value_1 = 3 +) AS combination +ORDER BY value_1; -- to make the tests more interested for aggregation tests, ingest some more data INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); diff --git a/src/test/regress/sql/multi_router_planner.sql b/src/test/regress/sql/multi_router_planner.sql index b9a72f69c..97defa1e9 100644 --- a/src/test/regress/sql/multi_router_planner.sql +++ b/src/test/regress/sql/multi_router_planner.sql @@ -264,14 +264,16 @@ SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 3 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; -- grouping sets are not supported on multiple shards SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 2 - GROUP BY GROUPING SETS ((id),(subtitle)); + GROUP BY GROUPING SETS ((id),(subtitle)) + ORDER BY id, subtitle; -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; @@ -396,16 +398,14 @@ SELECT max(word_count) WHERE author_id = 1 GROUP BY author_id; - --- router plannable union queries are supported -(SELECT * FROM articles_hash WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash WHERE author_id = 3); +-- router plannable union queries are supported SELECT * FROM ( - (SELECT * FROM articles_hash WHERE author_id = 1) + SELECT * FROM articles_hash WHERE author_id = 1 UNION - (SELECT * FROM articles_hash WHERE author_id = 3)) uu; + SELECT * FROM articles_hash WHERE author_id = 3 +) AS combination +ORDER BY id; (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1) UNION @@ -415,9 +415,12 @@ UNION INTERSECT (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); -(SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 1) -EXCEPT -(SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 3); +SELECT * FROM ( + SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 1 + EXCEPT + SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 3 +) AS combination +ORDER BY 1; -- union queries are not supported if not router plannable -- there is an inconsistency on shard pruning between @@ -627,16 +630,24 @@ SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); + -- union/difference /intersection with where false -- this query was not originally router plannable, addition of 1=0 -- makes it router plannable -(SELECT * FROM articles_hash WHERE author_id = 1) -UNION -(SELECT * FROM articles_hash WHERE author_id = 2 and 1=0); +SELECT * FROM ( + SELECT * FROM articles_hash WHERE author_id = 1 + UNION + SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 +) AS combination +ORDER BY id; + +SELECT * FROM ( + SELECT * FROM articles_hash WHERE author_id = 1 + EXCEPT + SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 +) AS combination +ORDER BY id; -(SELECT * FROM articles_hash WHERE author_id = 1) -EXCEPT -(SELECT * FROM articles_hash WHERE author_id = 2 and 1=0); (SELECT * FROM articles_hash WHERE author_id = 1) INTERSECT diff --git a/src/test/regress/sql/multi_tpch_query1.sql b/src/test/regress/sql/multi_tpch_query1.sql index c8e408ffe..382289313 100644 --- a/src/test/regress/sql/multi_tpch_query1.sql +++ b/src/test/regress/sql/multi_tpch_query1.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000; - - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query10.sql b/src/test/regress/sql/multi_tpch_query10.sql index 056497e6f..e3046ece8 100644 --- a/src/test/regress/sql/multi_tpch_query10.sql +++ b/src/test/regress/sql/multi_tpch_query10.sql @@ -7,9 +7,6 @@ -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000; - - SELECT c_custkey, c_name, diff --git a/src/test/regress/sql/multi_tpch_query12.sql b/src/test/regress/sql/multi_tpch_query12.sql index a00f5f896..746d422b2 100644 --- a/src/test/regress/sql/multi_tpch_query12.sql +++ b/src/test/regress/sql/multi_tpch_query12.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000; - - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query14.sql b/src/test/regress/sql/multi_tpch_query14.sql index c15e063d6..74f5b9954 100644 --- a/src/test/regress/sql/multi_tpch_query14.sql +++ b/src/test/regress/sql/multi_tpch_query14.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000; - - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query19.sql b/src/test/regress/sql/multi_tpch_query19.sql index b048eeca4..42a18e3bd 100644 --- a/src/test/regress/sql/multi_tpch_query19.sql +++ b/src/test/regress/sql/multi_tpch_query19.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000; - - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query3.sql b/src/test/regress/sql/multi_tpch_query3.sql index e315f8bef..1c5659ac5 100644 --- a/src/test/regress/sql/multi_tpch_query3.sql +++ b/src/test/regress/sql/multi_tpch_query3.sql @@ -7,9 +7,6 @@ -- coming from postgresql.conf or multi_task_tracker_executor.conf. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000; - - SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, diff --git a/src/test/regress/sql/multi_tpch_query6.sql b/src/test/regress/sql/multi_tpch_query6.sql index d84b31e1a..e0d4d4fea 100644 --- a/src/test/regress/sql/multi_tpch_query6.sql +++ b/src/test/regress/sql/multi_tpch_query6.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000; - - -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query7.sql b/src/test/regress/sql/multi_tpch_query7.sql index c42357db8..eb8e208e1 100644 --- a/src/test/regress/sql/multi_tpch_query7.sql +++ b/src/test/regress/sql/multi_tpch_query7.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000; - - -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_tpch_query7_nested.sql b/src/test/regress/sql/multi_tpch_query7_nested.sql index 82f95de32..251ddd1e9 100644 --- a/src/test/regress/sql/multi_tpch_query7_nested.sql +++ b/src/test/regress/sql/multi_tpch_query7_nested.sql @@ -3,9 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000; - - -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; diff --git a/src/test/regress/sql/multi_working_columns.sql b/src/test/regress/sql/multi_working_columns.sql index 2da6600c7..f7478ca87 100644 --- a/src/test/regress/sql/multi_working_columns.sql +++ b/src/test/regress/sql/multi_working_columns.sql @@ -8,9 +8,6 @@ -- grouping. -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1040000; - - SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; SELECT l_quantity, count(*) as count FROM lineitem diff --git a/src/test/regress/sql/task_tracker_assign_task.sql b/src/test/regress/sql/task_tracker_assign_task.sql index aea391f04..d19956773 100644 --- a/src/test/regress/sql/task_tracker_assign_task.sql +++ b/src/test/regress/sql/task_tracker_assign_task.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1050000; - \set JobId 401010 \set SimpleTaskId 101101 diff --git a/src/test/regress/sql/task_tracker_partition_task.sql b/src/test/regress/sql/task_tracker_partition_task.sql index d0e90704c..3d8f40117 100644 --- a/src/test/regress/sql/task_tracker_partition_task.sql +++ b/src/test/regress/sql/task_tracker_partition_task.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1080000; - \set JobId 401010 \set PartitionTaskId 801106 diff --git a/src/test/regress/sql/worker_binary_data_partition.sql b/src/test/regress/sql/worker_binary_data_partition.sql index b5eff2197..168302634 100644 --- a/src/test/regress/sql/worker_binary_data_partition.sql +++ b/src/test/regress/sql/worker_binary_data_partition.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1090000; - \set JobId 201010 \set TaskId 101105 diff --git a/src/test/regress/sql/worker_hash_partition.sql b/src/test/regress/sql/worker_hash_partition.sql index 90a1aaf54..024b46ed7 100644 --- a/src/test/regress/sql/worker_hash_partition.sql +++ b/src/test/regress/sql/worker_hash_partition.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1130000; - \set JobId 201010 \set TaskId 101103 diff --git a/src/test/regress/sql/worker_hash_partition_complex.sql b/src/test/regress/sql/worker_hash_partition_complex.sql index 39d1b98fc..5b860ec71 100644 --- a/src/test/regress/sql/worker_hash_partition_complex.sql +++ b/src/test/regress/sql/worker_hash_partition_complex.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1120000; - \set JobId 201010 \set TaskId 101104 diff --git a/src/test/regress/sql/worker_merge_hash_files.sql b/src/test/regress/sql/worker_merge_hash_files.sql index f2d09281c..1336e05eb 100644 --- a/src/test/regress/sql/worker_merge_hash_files.sql +++ b/src/test/regress/sql/worker_merge_hash_files.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1140000; - \set JobId 201010 \set TaskId 101103 diff --git a/src/test/regress/sql/worker_merge_range_files.sql b/src/test/regress/sql/worker_merge_range_files.sql index 069f4e360..aacdc7d15 100644 --- a/src/test/regress/sql/worker_merge_range_files.sql +++ b/src/test/regress/sql/worker_merge_range_files.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1150000; - \set JobId 201010 \set TaskId 101101 diff --git a/src/test/regress/sql/worker_null_data_partition.sql b/src/test/regress/sql/worker_null_data_partition.sql index 38c8d03c3..557904187 100644 --- a/src/test/regress/sql/worker_null_data_partition.sql +++ b/src/test/regress/sql/worker_null_data_partition.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1180000; - \set JobId 201010 \set Range_TaskId 101106 diff --git a/src/test/regress/sql/worker_range_partition.sql b/src/test/regress/sql/worker_range_partition.sql index cbf59f609..32d55bbda 100644 --- a/src/test/regress/sql/worker_range_partition.sql +++ b/src/test/regress/sql/worker_range_partition.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1160000; - \set JobId 201010 \set TaskId 101101 diff --git a/src/test/regress/sql/worker_range_partition_complex.sql b/src/test/regress/sql/worker_range_partition_complex.sql index 9f10d10e4..a9ff2fccd 100644 --- a/src/test/regress/sql/worker_range_partition_complex.sql +++ b/src/test/regress/sql/worker_range_partition_complex.sql @@ -3,8 +3,6 @@ -- -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1170000; - \set JobId 201010 \set TaskId 101102