From 01c23b0df2c82c13a54bf260d7e720498242325f Mon Sep 17 00:00:00 2001 From: Sait Talha Nisanci Date: Mon, 20 Jul 2020 12:44:42 +0300 Subject: [PATCH] update test outputs with task-tracker removal --- src/backend/distributed/planner/multi_physical_planner.c | 6 ++++-- src/backend/distributed/worker/task_tracker_protocol.c | 2 +- src/test/regress/expected/limit_intermediate_size.out | 3 +-- src/test/regress/expected/local_shard_execution.out | 5 ----- src/test/regress/expected/multi_cross_shard.out | 1 - src/test/regress/expected/multi_join_order_additional.out | 1 - .../regress/expected/multi_join_order_tpch_repartition.out | 1 - .../regress/expected/multi_mx_repartition_udt_prepare.out | 1 - src/test/regress/expected/multi_mx_router_planner.out | 7 ------- src/test/regress/expected/multi_partitioning.out | 1 - src/test/regress/expected/multi_prepare_plsql.out | 1 - src/test/regress/expected/multi_prepare_sql.out | 1 - src/test/regress/expected/multi_repartition_udt.out | 1 - src/test/regress/expected/multi_router_planner.out | 2 -- .../regress/expected/multi_router_planner_fast_path.out | 2 -- src/test/regress/expected/single_hash_repartition_join.out | 1 - src/test/regress/expected/with_executors.out | 6 ++---- src/test/regress/pg_regress_multi.pl | 1 - 18 files changed, 8 insertions(+), 35 deletions(-) diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index b0f1603df..46c70014c 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -4610,8 +4610,10 @@ MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex) uint32 mapTaskNodePort = mapTaskPlacement->nodePort; /* - * If replication factor is 1, then we know that we will use the first and - * the only placement. + * We will use the first node even if replication factor is greater than 1 + * When replication factor is greater than 1 and there + * is a connection problem to the node that has done the map task, we will get + * an error in fetch task execution. */ StringInfo mapFetchQueryString = makeStringInfo(); appendStringInfo(mapFetchQueryString, MAP_OUTPUT_FETCH_COMMAND, diff --git a/src/backend/distributed/worker/task_tracker_protocol.c b/src/backend/distributed/worker/task_tracker_protocol.c index fdf748de5..9b2016f67 100644 --- a/src/backend/distributed/worker/task_tracker_protocol.c +++ b/src/backend/distributed/worker/task_tracker_protocol.c @@ -3,7 +3,7 @@ * task_tracker_protocol.c * * The methods in the file are deprecated. - * + * * Copyright (c) Citus Data, Inc. * * $Id$ diff --git a/src/test/regress/expected/limit_intermediate_size.out b/src/test/regress/expected/limit_intermediate_size.out index 7c792759a..1bdda90be 100644 --- a/src/test/regress/expected/limit_intermediate_size.out +++ b/src/test/regress/expected/limit_intermediate_size.out @@ -15,7 +15,6 @@ ERROR: the intermediate result size exceeds citus.max_intermediate_result_size DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. SET citus.max_intermediate_result_size TO 9; --- regular adaptive CTE should fail WITH cte AS ( SELECT @@ -203,7 +202,7 @@ SELECT * FROM cte4 ORDER BY 1,2,3,4,5 LIMIT 5; 1 | Wed Nov 22 21:06:57.457147 2017 | 4 | 3 | 2 | (5 rows) --- regular adaptive CTE, should work since -1 disables the limit +-- regular adaptive executor CTE, should work since -1 disables the limit WITH cte AS ( SELECT diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index ba03d8fd8..f40408a9d 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -1397,11 +1397,9 @@ NOTICE: executing the command locally: DELETE FROM local_shard_execution.refere DELETE FROM reference_table; NOTICE: executing the command locally: DELETE FROM local_shard_execution.reference_table_1470000 reference_table ROLLBACK; --- adaptive select execution BEGIN; DELETE FROM distributed_table WHERE key = 500; NOTICE: executing the command locally: DELETE FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (key OPERATOR(pg_catalog.=) 500) - SELECT count(*) FROM distributed_table; NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE true @@ -1411,10 +1409,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar (1 row) ROLLBACK; --- local execution should not be executed locally --- becase a adaptive query has already been executed BEGIN; - SET LOCAL client_min_messages TO INFO; SELECT count(*) FROM distributed_table; NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE true diff --git a/src/test/regress/expected/multi_cross_shard.out b/src/test/regress/expected/multi_cross_shard.out index a42807a17..f93522fcf 100644 --- a/src/test/regress/expected/multi_cross_shard.out +++ b/src/test/regress/expected/multi_cross_shard.out @@ -172,7 +172,6 @@ INSERT INTO tt1 VALUES(1, 'Ahmet'); INSERT INTO tt1 VALUES(2, 'Mehmet'); INSERT INTO tt2 VALUES(1, 'Ahmet', 5); INSERT INTO tt2 VALUES(2, 'Mehmet', 15); --- Should notice since it is a adaptive query SELECT tt1.id, tt2.count from tt1,tt2 where tt1.id = tt2.id ORDER BY 1; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index da0b0078b..3cb508514 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -5,7 +5,6 @@ SET citus.next_shard_id TO 650000; -- Set configuration to print table join order and pruned shards SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; -SET citus.task_executor_type = 'adaptive'; -- can't explain all queries otherwise SET citus.enable_repartition_joins to ON; SET citus.shard_count to 2; SET citus.shard_replication_factor to 1; diff --git a/src/test/regress/expected/multi_join_order_tpch_repartition.out b/src/test/regress/expected/multi_join_order_tpch_repartition.out index d747f9f6b..959673bd6 100644 --- a/src/test/regress/expected/multi_join_order_tpch_repartition.out +++ b/src/test/regress/expected/multi_join_order_tpch_repartition.out @@ -5,7 +5,6 @@ SET citus.next_shard_id TO 660000; -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; -SET citus.task_executor_type = 'adaptive'; -- can't explain all queries otherwise SET citus.enable_repartition_joins to ON; SET client_min_messages TO LOG; -- The following queries are basically the same as the ones in tpch_small diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out index 0f68919db..92835d031 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out @@ -152,7 +152,6 @@ INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -SET citus.task_executor_type = 'adaptive'; SET citus.enable_repartition_joins to ON; -- Query that should result in a repartition -- join on int column, and be empty. diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index c6d53b664..ea2d153b0 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -1418,13 +1418,6 @@ SET client_min_messages to 'DEBUG2'; CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS SELECT * FROM articles_hash_mx WHERE author_id in (1,2); DEBUG: Router planner cannot handle multi-shard select queries --- router planner/executor is disabled for adaptive executor --- following query is router plannable, but router planner is disabled --- TODO: Uncomment once we fix adaptive issue -----SELECT id --- FROM articles_hash_mx --- WHERE author_id = 1; --- insert query is router plannable even under adaptive INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814); DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 6d5cc7a3b..e85a4a2a9 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -1299,7 +1299,6 @@ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass (3 rows) COMMIT; --- test locks on adaptive SELECT BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; id | ref_id | time | id | ref_id | time diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 9df11b352..83fc11457 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -739,7 +739,6 @@ SELECT real_time_partition_column_select(6); (6,) (4 rows) --- check adaptive executor CREATE FUNCTION task_tracker_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 6c864fa8b..4f30bc716 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -763,7 +763,6 @@ EXECUTE prepared_real_time_partition_column_select(6); 6 | (4 rows) --- check adaptive executor PREPARE prepared_task_tracker_non_partition_column_select(int) AS SELECT prepare_table.key, diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index 29cb7fe0e..c37bbba02 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -164,7 +164,6 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other (0 rows) -- Query that should result in a repartition join on UDT column. -SET citus.task_executor_type = 'adaptive'; SET citus.log_multi_join_order = true; EXPLAIN (COSTS OFF) SELECT * FROM repartition_udt JOIN repartition_udt_other diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index b92eda908..d04677c8c 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -2278,7 +2278,6 @@ SELECT * FROM mv_articles_hash_data ORDER BY 1, 2, 3, 4; 42 | 2 | ausable | 15885 (10 rows) --- router planner/executor is now enabled for adaptive executor SELECT id FROM articles_hash WHERE author_id = 1 @@ -2294,7 +2293,6 @@ DEBUG: query has a single distribution column value: 1 41 (5 rows) --- insert query is router plannable even under adaptive INSERT INTO articles_hash VALUES (51, 1, 'amateus', 1814), (52, 1, 'second amateus', 2824); DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out index 9a172546e..a3ff010cf 100644 --- a/src/test/regress/expected/multi_router_planner_fast_path.out +++ b/src/test/regress/expected/multi_router_planner_fast_path.out @@ -1995,7 +1995,6 @@ SELECT * FROM mv_articles_hash_empty; 41 | 1 | aznavour | 11814 (5 rows) --- fast-path router planner/executor is enabled for adaptive executor SELECT id FROM articles_hash WHERE author_id = 1; @@ -2011,7 +2010,6 @@ DEBUG: query has a single distribution column value: 1 41 (5 rows) --- insert query is router plannable even under adaptive INSERT INTO articles_hash VALUES (51, 1, 'amateus', 1814), (52, 1, 'second amateus', 2824); DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 diff --git a/src/test/regress/expected/single_hash_repartition_join.out b/src/test/regress/expected/single_hash_repartition_join.out index 6e12491a3..31a5c7e9f 100644 --- a/src/test/regress/expected/single_hash_repartition_join.out +++ b/src/test/regress/expected/single_hash_repartition_join.out @@ -519,7 +519,6 @@ SELECT create_distributed_table('dist_1', 'a'); (1 row) INSERT INTO dist_1 SELECT x,10-x FROM generate_series(1,10) x; -SET citus.task_executor_type to 'adaptive'; SELECT COUNT(*) FROM dist_1 f, dist_1 s WHERE f.a = s.b; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/with_executors.out b/src/test/regress/expected/with_executors.out index 8f726dd90..02878bf15 100644 --- a/src/test/regress/expected/with_executors.out +++ b/src/test/regress/expected/with_executors.out @@ -1,4 +1,4 @@ --- Confirm we can use local, router, real-time, and adaptive execution +-- Confirm we can use local, and adaptive execution CREATE SCHEMA with_executors; SET search_path TO with_executors, public; SET citus.enable_repartition_joins TO on; @@ -152,7 +152,7 @@ SELECT * FROM cte WHERE uid=1 ORDER BY 2 LIMIT 5; 1 | 0 (5 rows) --- CTEs should be able to use adaptive queries +-- CTEs should be able to use adaptive executor WITH cte AS ( WITH task_tracker_1 AS ( SELECT @@ -375,8 +375,6 @@ SELECT count(*) FROM cte, users_table where cte.count=user_id; 0 (1 row) - --- CTEs shouldn't be able to terminate a adaptive query WITH cte_1 AS ( SELECT u_table.user_id as u_id, e_table.event_type diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 51148554b..5614bfbec 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -409,7 +409,6 @@ push(@pgOptions, '-c', "wal_level=logical"); # Citus options set for the tests push(@pgOptions, '-c', "citus.shard_count=4"); push(@pgOptions, '-c', "citus.shard_max_size=1500kB"); -push(@pgOptions, '-c', "citus.max_running_tasks_per_node=4"); push(@pgOptions, '-c', "citus.repartition_join_bucket_count_per_node=2"); push(@pgOptions, '-c', "citus.expire_cached_shards=on"); push(@pgOptions, '-c', "citus.sort_returning=on");