Set citus.enable_unique_job_ids in tests with job ID in output

pull/1208/head
Marco Slot 2017-02-06 11:10:41 +01:00
parent dfd7d86948
commit 40829c2ba9
20 changed files with 117 additions and 190 deletions

View File

@ -2,7 +2,6 @@
-- MULTI_EXPLAIN -- MULTI_EXPLAIN
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000;
-- print major version to make version-specific tests clear -- print major version to make version-specific tests clear
SELECT substring(version(), '\d+\.\d+') AS major_version; SELECT substring(version(), '\d+\.\d+') AS major_version;
major_version major_version

View File

@ -6,7 +6,7 @@
-- transaction ids in them. Also, we set the executor type to task tracker -- transaction ids in them. Also, we set the executor type to task tracker
-- executor here, as we cannot run repartition jobs with real time executor. -- executor here, as we cannot run repartition jobs with real time executor.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off;
BEGIN; BEGIN;
SET client_min_messages TO DEBUG4; SET client_min_messages TO DEBUG4;
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
@ -21,10 +21,6 @@ DEBUG: CommitTransactionCommand
-- Debug4 log messages display jobIds within them. We explicitly set the jobId -- Debug4 log messages display jobIds within them. We explicitly set the jobId
-- sequence here so that the regression output becomes independent of the number -- sequence here so that the regression output becomes independent of the number
-- of jobs executed prior to running this test. -- of jobs executed prior to running this test.
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand
-- Multi-level repartition join to verify our projection columns are correctly -- Multi-level repartition join to verify our projection columns are correctly
-- referenced and propagated across multiple repartition jobs. The test also -- referenced and propagated across multiple repartition jobs. The test also
-- validates that only the minimal necessary projection columns are transferred -- validates that only the minimal necessary projection columns are transferred
@ -53,21 +49,21 @@ DEBUG: join prunable for intervals [8997,10560] and [1,5986]
DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986]
DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986]
DEBUG: join prunable for intervals [13473,14947] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986]
DEBUG: generated sql query for job 1250 and task 3 DEBUG: generated sql query for task 3
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 6 DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 9 DEBUG: generated sql query for task 9
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 12 DEBUG: generated sql query for task 12
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 15 DEBUG: generated sql query for task 15
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 18 DEBUG: generated sql query for task 18
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 21 DEBUG: generated sql query for task 21
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: generated sql query for job 1250 and task 24 DEBUG: generated sql query for task 24
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57638
@ -79,10 +75,10 @@ DEBUG: assigned task 24 to node localhost:57637
DEBUG: assigned task 21 to node localhost:57638 DEBUG: assigned task 21 to node localhost:57638
DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1,1000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: generated sql query for job 1251 and task 3 DEBUG: generated sql query for task 3
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000025".intermediate_column_1250_0, "pg_merge_job_1250.task_000025".intermediate_column_1250_1, "pg_merge_job_1250.task_000025".intermediate_column_1250_2, "pg_merge_job_1250.task_000025".intermediate_column_1250_3, "pg_merge_job_1250.task_000025".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000025 "pg_merge_job_1250.task_000025" JOIN part_290011 part ON (("pg_merge_job_1250.task_000025".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DEBUG: generated sql query for job 1251 and task 6 DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000034".intermediate_column_1250_0, "pg_merge_job_1250.task_000034".intermediate_column_1250_1, "pg_merge_job_1250.task_000034".intermediate_column_1250_2, "pg_merge_job_1250.task_000034".intermediate_column_1250_3, "pg_merge_job_1250.task_000034".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000034 "pg_merge_job_1250.task_000034" JOIN part_280002 part ON (("pg_merge_job_1250.task_000034".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)" DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 25 DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 4 DEBUG: pruning merge fetch taskId 4
@ -95,12 +91,12 @@ DEBUG: join prunable for intervals [1001,2000] and [1,1000]
DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1,1000]
DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
DEBUG: generated sql query for job 1252 and task 3 DEBUG: generated sql query for task 3
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1" DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1"
DEBUG: generated sql query for job 1252 and task 6 DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1" DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1"
DEBUG: generated sql query for job 1252 and task 9 DEBUG: generated sql query for task 9
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1" DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1"
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 7 DETAIL: Creating dependency on merge taskId 7
DEBUG: pruning merge fetch taskId 4 DEBUG: pruning merge fetch taskId 4
@ -110,12 +106,12 @@ DETAIL: Creating dependency on merge taskId 13
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57638
DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57637
DEBUG: completed cleanup query for job 1252 DEBUG: completed cleanup query for job 3
DEBUG: completed cleanup query for job 1252 DEBUG: completed cleanup query for job 3
DEBUG: completed cleanup query for job 1251 DEBUG: completed cleanup query for job 2
DEBUG: completed cleanup query for job 1251 DEBUG: completed cleanup query for job 2
DEBUG: completed cleanup query for job 1250 DEBUG: completed cleanup query for job 1
DEBUG: completed cleanup query for job 1250 DEBUG: completed cleanup query for job 1
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
l_partkey | o_orderkey | count l_partkey | o_orderkey | count
-----------+------------+------- -----------+------------+-------
@ -162,21 +158,21 @@ GROUP BY
ORDER BY ORDER BY
l_partkey, o_orderkey; l_partkey, o_orderkey;
DEBUG: StartTransactionCommand DEBUG: StartTransactionCommand
DEBUG: generated sql query for job 1253 and task 2 DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 4 DEBUG: generated sql query for task 4
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 6 DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 8 DEBUG: generated sql query for task 8
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 10 DEBUG: generated sql query for task 10
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 12 DEBUG: generated sql query for task 12
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 14 DEBUG: generated sql query for task 14
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
DEBUG: generated sql query for job 1253 and task 16 DEBUG: generated sql query for task 16
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)" DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
@ -186,9 +182,9 @@ DEBUG: assigned task 12 to node localhost:57637
DEBUG: assigned task 10 to node localhost:57638 DEBUG: assigned task 10 to node localhost:57638
DEBUG: assigned task 16 to node localhost:57637 DEBUG: assigned task 16 to node localhost:57637
DEBUG: assigned task 14 to node localhost:57638 DEBUG: assigned task 14 to node localhost:57638
DEBUG: generated sql query for job 1254 and task 2 DEBUG: generated sql query for task 2
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)" DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
DEBUG: generated sql query for job 1254 and task 4 DEBUG: generated sql query for task 4
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)" DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57638
@ -204,14 +200,14 @@ DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: generated sql query for job 1255 and task 3 DEBUG: generated sql query for task 3
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000017".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000017 "pg_merge_job_1253.task_000017" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000017".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000017".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0" DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0"
DEBUG: generated sql query for job 1255 and task 6 DEBUG: generated sql query for task 6
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000026".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000026 "pg_merge_job_1253.task_000026" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000026".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000026".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0" DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0"
DEBUG: generated sql query for job 1255 and task 9 DEBUG: generated sql query for task 9
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000035".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000035 "pg_merge_job_1253.task_000035" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000035".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000035".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0" DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0"
DEBUG: generated sql query for job 1255 and task 12 DEBUG: generated sql query for task 12
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000044".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000044 "pg_merge_job_1253.task_000044" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000044".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000044".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0" DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0"
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 17 DETAIL: Creating dependency on merge taskId 17
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -228,16 +224,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 44 DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 14 DETAIL: Creating dependency on merge taskId 14
DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637
DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57637
DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638
DEBUG: completed cleanup query for job 1255 DEBUG: completed cleanup query for job 6
DEBUG: completed cleanup query for job 1255 DEBUG: completed cleanup query for job 6
DEBUG: completed cleanup query for job 1253 DEBUG: completed cleanup query for job 4
DEBUG: completed cleanup query for job 1253 DEBUG: completed cleanup query for job 4
DEBUG: completed cleanup query for job 1254 DEBUG: completed cleanup query for job 5
DEBUG: completed cleanup query for job 1254 DEBUG: completed cleanup query for job 5
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
l_partkey | o_orderkey | count l_partkey | o_orderkey | count
-----------+------------+------- -----------+------------+-------

View File

@ -6,7 +6,6 @@
-- from a sql task to its depended tasks. Note that we set the executor type to task -- from a sql task to its depended tasks. Note that we set the executor type to task
-- tracker executor here, as we cannot run repartition jobs with real time executor. -- tracker executor here, as we cannot run repartition jobs with real time executor.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 710000;
BEGIN; BEGIN;
SET client_min_messages TO DEBUG3; SET client_min_messages TO DEBUG3;
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
@ -186,19 +185,6 @@ SET citus.large_table_shard_count TO 2;
DEBUG: StartTransactionCommand DEBUG: StartTransactionCommand
DEBUG: ProcessUtility DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
-- The next test, dual hash repartition join, uses the current jobId to assign
-- tasks in a round-robin fashion. We therefore need to ensure that jobIds start
-- with an odd number here to get consistent test output.
SELECT case when (currval('pg_dist_jobid_seq') % 2) = 0
then nextval('pg_dist_jobid_seq') % 2
else 1 end;
DEBUG: StartTransactionCommand
DEBUG: CommitTransactionCommand
case
------
1
(1 row)
-- Dual hash repartition join which tests the separate hash repartition join -- Dual hash repartition join which tests the separate hash repartition join
-- task assignment algorithm. -- task assignment algorithm.
SELECT SELECT
@ -247,10 +233,10 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 44 DETAIL: Creating dependency on merge taskId 44
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 19 DETAIL: Creating dependency on merge taskId 19
DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57638
DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638
DEBUG: assigned task 12 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
count count
------- -------

View File

@ -2,11 +2,8 @@
-- MULTI_MX_EXPLAIN -- MULTI_MX_EXPLAIN
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
\c - - - :worker_1_port \c - - - :worker_1_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
\c - - - :worker_2_port \c - - - :worker_2_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
\c - - - :master_port \c - - - :master_port
\a\t \a\t
SET citus.task_executor_type TO 'real-time'; SET citus.task_executor_type TO 'real-time';

View File

@ -2,7 +2,6 @@
-- MULTI_MX_REPARTITION_UDT_PREPARE -- MULTI_MX_REPARTITION_UDT_PREPARE
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
-- START type creation -- START type creation
CREATE TYPE test_udt AS (i integer, i2 integer); CREATE TYPE test_udt AS (i integer, i2 integer);
-- ... as well as a function to use as its comparator... -- ... as well as a function to use as its comparator...
@ -202,6 +201,4 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
\c - - - :worker_1_port \c - - - :worker_1_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
\c - - - :worker_2_port \c - - - :worker_2_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;

View File

@ -6,12 +6,11 @@
-- the resource owner should automatically clean up these intermediate query -- the resource owner should automatically clean up these intermediate query
-- result files. -- result files.
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 810000; SET citus.enable_unique_job_ids TO off;
BEGIN; BEGIN;
-- pg_ls_dir() displays jobids. We explicitly set the jobId sequence -- pg_ls_dir() displays jobids. We explicitly set the jobId sequence
-- here so that the regression output becomes independent of the -- here so that the regression output becomes independent of the
-- number of jobs executed prior to running this test. -- number of jobs executed prior to running this test.
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250;
SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem;
revenue revenue
--------------- ---------------
@ -218,26 +217,26 @@ FETCH 1 FROM c_19;
SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f;
f f
----------------- -----------------
master_job_1256 master_job_0007
master_job_1257 master_job_0008
master_job_1258 master_job_0009
master_job_1259 master_job_0010
master_job_1260 master_job_0011
master_job_1261 master_job_0012
master_job_1262 master_job_0013
master_job_1263 master_job_0014
master_job_1264 master_job_0015
master_job_1265 master_job_0016
master_job_1266 master_job_0017
master_job_1267 master_job_0018
master_job_1268 master_job_0019
master_job_1269 master_job_0020
master_job_1270 master_job_0021
master_job_1271 master_job_0022
master_job_1272 master_job_0023
master_job_1273 master_job_0024
master_job_1274 master_job_0025
master_job_1275 master_job_0026
(20 rows) (20 rows)
-- close first, 17th (first after re-alloc) and last cursor. -- close first, 17th (first after re-alloc) and last cursor.
@ -247,23 +246,23 @@ CLOSE c_19;
SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f;
f f
----------------- -----------------
master_job_1257 master_job_0008
master_job_1258 master_job_0009
master_job_1259 master_job_0010
master_job_1260 master_job_0011
master_job_1261 master_job_0012
master_job_1262 master_job_0013
master_job_1263 master_job_0014
master_job_1264 master_job_0015
master_job_1265 master_job_0016
master_job_1266 master_job_0017
master_job_1267 master_job_0018
master_job_1268 master_job_0019
master_job_1269 master_job_0020
master_job_1270 master_job_0021
master_job_1271 master_job_0022
master_job_1273 master_job_0024
master_job_1274 master_job_0025
(17 rows) (17 rows)
ROLLBACK; ROLLBACK;

View File

@ -2,7 +2,6 @@
-- MULTI_REPARTITION_UDT -- MULTI_REPARTITION_UDT
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
-- START type creation -- START type creation
CREATE TYPE test_udt AS (i integer, i2 integer); CREATE TYPE test_udt AS (i integer, i2 integer);
-- ... as well as a function to use as its comparator... -- ... as well as a function to use as its comparator...

View File

@ -1,5 +1,4 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
-- =================================================================== -- ===================================================================
-- test router planner functionality for single shard select queries -- test router planner functionality for single shard select queries
-- =================================================================== -- ===================================================================
@ -1873,8 +1872,7 @@ WARNING: relation "public.articles_append" does not exist
CONTEXT: while executing command on localhost:57638 CONTEXT: while executing command on localhost:57638
WARNING: relation "public.articles_append" does not exist WARNING: relation "public.articles_append" does not exist
CONTEXT: while executing command on localhost:57638 CONTEXT: while executing command on localhost:57638
ERROR: failed to execute job 840026 ERROR: failed to execute task 2
DETAIL: Failure due to failed task 2
-- same query with where false but evaluation left to worker -- same query with where false but evaluation left to worker
SELECT author_id FROM articles_append SELECT author_id FROM articles_append
WHERE WHERE
@ -1889,8 +1887,7 @@ WARNING: relation "public.articles_append" does not exist
CONTEXT: while executing command on localhost:57638 CONTEXT: while executing command on localhost:57638
WARNING: relation "public.articles_append" does not exist WARNING: relation "public.articles_append" does not exist
CONTEXT: while executing command on localhost:57638 CONTEXT: while executing command on localhost:57638
ERROR: failed to execute job 840027 ERROR: failed to execute task 2
DETAIL: Failure due to failed task 2
-- same query on router planner with where false but evaluation left to worker -- same query on router planner with where false but evaluation left to worker
SELECT author_id FROM articles_single_shard_hash SELECT author_id FROM articles_single_shard_hash
WHERE WHERE

View File

@ -2,7 +2,6 @@
-- MULTI_TASK_ASSIGNMENT -- MULTI_TASK_ASSIGNMENT
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 880000;
SET citus.explain_distributed_queries TO off; SET citus.explain_distributed_queries TO off;
-- Check that our policies for assigning tasks to worker nodes run as expected. -- Check that our policies for assigning tasks to worker nodes run as expected.
-- To test this, we first create a shell table, and then manually insert shard -- To test this, we first create a shell table, and then manually insert shard
@ -114,39 +113,11 @@ DEBUG: CommitTransactionCommand
explain statements for distributed queries are not enabled explain statements for distributed queries are not enabled
(3 rows) (3 rows)
-- Round-robin task assignment relies on the current jobId. We therefore need to
-- ensure that jobIds start with an odd number here; this way, task assignment
-- debug messages always produce the same output. Also, we make sure that the
-- following case statement always prints out "1" as the query's result.
SELECT case when (currval('pg_dist_jobid_seq') % 2) = 0
then nextval('pg_dist_jobid_seq') % 2
else 1 end;
DEBUG: StartTransactionCommand
DEBUG: CommitTransactionCommand
case
------
1
(1 row)
-- Finally test the round-robin task assignment policy -- Finally test the round-robin task assignment policy
SET citus.task_assignment_policy TO 'round-robin'; SET citus.task_assignment_policy TO 'round-robin';
DEBUG: StartTransactionCommand DEBUG: StartTransactionCommand
DEBUG: ProcessUtility DEBUG: ProcessUtility
DEBUG: CommitTransactionCommand DEBUG: CommitTransactionCommand
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57637
DEBUG: assigned task 4 to node localhost:57637
DEBUG: assigned task 2 to node localhost:57638
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand DEBUG: StartTransactionCommand
DEBUG: ProcessUtility DEBUG: ProcessUtility
@ -175,6 +146,20 @@ DEBUG: CommitTransactionCommand
explain statements for distributed queries are not enabled explain statements for distributed queries are not enabled
(3 rows) (3 rows)
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
DEBUG: StartTransactionCommand
DEBUG: ProcessUtility
DEBUG: assigned task 6 to node localhost:57638
DEBUG: assigned task 4 to node localhost:57638
DEBUG: assigned task 2 to node localhost:57637
DEBUG: CommitTransactionCommand
QUERY PLAN
-----------------------------------------------------------------------
Aggregate (cost=0.00..0.00 rows=0 width=0)
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
explain statements for distributed queries are not enabled
(3 rows)
RESET citus.task_assignment_policy; RESET citus.task_assignment_policy;
DEBUG: StartTransactionCommand DEBUG: StartTransactionCommand
DEBUG: ProcessUtility DEBUG: ProcessUtility

View File

@ -4,7 +4,6 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 270000;
-- print major version to make version-specific tests clear -- print major version to make version-specific tests clear
SHOW server_version \gset SHOW server_version \gset
@ -702,6 +701,7 @@ LIMIT
-- Same queries above with explain -- Same queries above with explain
-- Simple join subquery pushdown -- Simple join subquery pushdown
EXPLAIN SELECT EXPLAIN SELECT
avg(array_length(events, 1)) AS event_average avg(array_length(events, 1)) AS event_average
FROM FROM

View File

@ -2,7 +2,6 @@
-- MULTI_SUBQUERY -- MULTI_SUBQUERY
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 270000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 270000;
-- print major version to make version-specific tests clear -- print major version to make version-specific tests clear
SHOW server_version \gset SHOW server_version \gset
SELECT substring(:'server_version', '\d+\.\d+') AS major_version; SELECT substring(:'server_version', '\d+\.\d+') AS major_version;

View File

@ -3,7 +3,6 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000;
-- print major version to make version-specific tests clear -- print major version to make version-specific tests clear
SELECT substring(version(), '\d+\.\d+') AS major_version; SELECT substring(version(), '\d+\.\d+') AS major_version;

View File

@ -8,7 +8,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off;
BEGIN; BEGIN;
@ -20,7 +20,6 @@ SET citus.task_executor_type TO 'task-tracker';
-- sequence here so that the regression output becomes independent of the number -- sequence here so that the regression output becomes independent of the number
-- of jobs executed prior to running this test. -- of jobs executed prior to running this test.
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250;
-- Multi-level repartition join to verify our projection columns are correctly -- Multi-level repartition join to verify our projection columns are correctly
-- referenced and propagated across multiple repartition jobs. The test also -- referenced and propagated across multiple repartition jobs. The test also

View File

@ -8,7 +8,6 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 710000;
BEGIN; BEGIN;
@ -43,14 +42,6 @@ WHERE
SET citus.large_table_shard_count TO 2; SET citus.large_table_shard_count TO 2;
-- The next test, dual hash repartition join, uses the current jobId to assign
-- tasks in a round-robin fashion. We therefore need to ensure that jobIds start
-- with an odd number here to get consistent test output.
SELECT case when (currval('pg_dist_jobid_seq') % 2) = 0
then nextval('pg_dist_jobid_seq') % 2
else 1 end;
-- Dual hash repartition join which tests the separate hash repartition join -- Dual hash repartition join which tests the separate hash repartition join
-- task assignment algorithm. -- task assignment algorithm.

View File

@ -3,11 +3,8 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
\c - - - :worker_1_port \c - - - :worker_1_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
\c - - - :worker_2_port \c - - - :worker_2_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
\c - - - :master_port \c - - - :master_port
\a\t \a\t
@ -65,6 +62,7 @@ BEGIN
END; END;
$BODY$ LANGUAGE plpgsql; $BODY$ LANGUAGE plpgsql;
-- Test Text format -- Test Text format
EXPLAIN (COSTS FALSE, FORMAT TEXT) EXPLAIN (COSTS FALSE, FORMAT TEXT)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
@ -81,6 +79,7 @@ SELECT true AS valid FROM explain_json($$
GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
\c - - - :worker_1_port \c - - - :worker_1_port
-- Test XML format -- Test XML format
EXPLAIN (COSTS FALSE, FORMAT XML) EXPLAIN (COSTS FALSE, FORMAT XML)
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
@ -102,6 +101,7 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
GROUP BY l_quantity ORDER BY count_quantity, l_quantity; GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
\c - - - :worker_2_port \c - - - :worker_2_port
-- Test verbose -- Test verbose
EXPLAIN (COSTS FALSE, VERBOSE TRUE) EXPLAIN (COSTS FALSE, VERBOSE TRUE)
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx; SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;

View File

@ -3,7 +3,6 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
-- START type creation -- START type creation
@ -213,6 +212,4 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other
ORDER BY repartition_udt.pk; ORDER BY repartition_udt.pk;
\c - - - :worker_1_port \c - - - :worker_1_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
\c - - - :worker_2_port \c - - - :worker_2_port
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;

View File

@ -9,7 +9,7 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 810000; SET citus.enable_unique_job_ids TO off;
BEGIN; BEGIN;
@ -17,7 +17,6 @@ BEGIN;
-- pg_ls_dir() displays jobids. We explicitly set the jobId sequence -- pg_ls_dir() displays jobids. We explicitly set the jobId sequence
-- here so that the regression output becomes independent of the -- here so that the regression output becomes independent of the
-- number of jobs executed prior to running this test. -- number of jobs executed prior to running this test.
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250;
SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem;
SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem;

View File

@ -3,7 +3,6 @@
-- --
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
-- START type creation -- START type creation

View File

@ -1,6 +1,5 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
-- =================================================================== -- ===================================================================

View File

@ -4,7 +4,6 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 880000;
SET citus.explain_distributed_queries TO off; SET citus.explain_distributed_queries TO off;
@ -76,15 +75,6 @@ EXPLAIN SELECT count(*) FROM task_assignment_test_table;
EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table;
-- Round-robin task assignment relies on the current jobId. We therefore need to
-- ensure that jobIds start with an odd number here; this way, task assignment
-- debug messages always produce the same output. Also, we make sure that the
-- following case statement always prints out "1" as the query's result.
SELECT case when (currval('pg_dist_jobid_seq') % 2) = 0
then nextval('pg_dist_jobid_seq') % 2
else 1 end;
-- Finally test the round-robin task assignment policy -- Finally test the round-robin task assignment policy
SET citus.task_assignment_policy TO 'round-robin'; SET citus.task_assignment_policy TO 'round-robin';