From e3e174f30fbc7cd01f6d7dcf17c67de8791c78a4 Mon Sep 17 00:00:00 2001 From: Hadi Moshayedi Date: Tue, 10 Dec 2019 14:25:47 -0800 Subject: [PATCH 1/4] Fix the way we check for local/reference table joins in the executor --- .../distributed/executor/multi_executor.c | 20 +++++++++++++++---- ...licate_reference_tables_to_coordinator.out | 3 +++ ...licate_reference_tables_to_coordinator.sql | 4 ++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 466c1c023..691abf1ff 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -580,7 +580,7 @@ IsLocalReferenceTableJoinPlan(PlannedStmt *plan) { bool hasReferenceTable = false; bool hasLocalTable = false; - ListCell *oidCell = NULL; + ListCell *rangeTableCell = NULL; bool hasReferenceTableReplica = false; /* @@ -617,12 +617,24 @@ IsLocalReferenceTableJoinPlan(PlannedStmt *plan) return false; } - foreach(oidCell, plan->relationOids) + /* + * plan->rtable contains the flattened RTE lists of the plan tree, which + * includes rtes in subqueries, CTEs, ... + * + * It doesn't contain optimized away table accesses (due to join optimization), + * which is fine for our purpose. + */ + foreach(rangeTableCell, plan->rtable) { - Oid relationId = lfirst_oid(oidCell); + RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); bool onlySearchPath = false; - if (RelationIsAKnownShard(relationId, onlySearchPath)) + if (rangeTableEntry->rtekind != RTE_RELATION) + { + continue; + } + + if (RelationIsAKnownShard(rangeTableEntry->relid, onlySearchPath)) { /* * We don't allow joining non-reference distributed tables, so we diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index f825e5a50..9ee24b477 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -244,6 +244,9 @@ HINT: Consider using an equality filter on the distributed table's partition co SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE; ERROR: could not run distributed query with FOR UPDATE/SHARE commands HINT: Consider using an equality filter on the distributed table's partition column. +-- verify that we can drop columns from reference tables replicated to the coordinator +-- see https://github.com/citusdata/citus/issues/3279 +ALTER TABLE squares DROP COLUMN b; -- clean-up SET client_min_messages TO ERROR; DROP SCHEMA replicate_ref_to_coordinator CASCADE; diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index b2e3667e0..0f52cec00 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -138,6 +138,10 @@ SELECT a FROM t NATURAL JOIN dist; SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR SHARE; SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE; +-- verify that we can drop columns from reference tables replicated to the coordinator +-- see https://github.com/citusdata/citus/issues/3279 +ALTER TABLE squares DROP COLUMN b; + -- clean-up SET client_min_messages TO ERROR; DROP SCHEMA replicate_ref_to_coordinator CASCADE; From 067d92a7f65dd191e94eceeeb49b997596d92a53 Mon Sep 17 00:00:00 2001 From: Hadi Moshayedi Date: Wed, 11 Dec 2019 14:31:34 -0800 Subject: [PATCH 2/4] Don't plan joins between ref tables and views locally --- .../distributed/planner/distributed_planner.c | 9 ++++ .../regress/expected/multi_test_helpers.out | 28 +++++++++--- ...licate_reference_tables_to_coordinator.out | 45 +++++++++++++++++++ src/test/regress/sql/multi_test_helpers.sql | 29 +++++++++--- ...licate_reference_tables_to_coordinator.sql | 31 +++++++++++++ 5 files changed, 128 insertions(+), 14 deletions(-) diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 2e825ccb4..47f75da80 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -1965,6 +1965,15 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList) continue; } + /* + * We only allow local join for the relation kinds for which we can + * determine deterministcly that access to hem are local or distributed. + * For this reason, we don't allow non-materialized views. + */ + if (rangeTableEntry->relkind == RELKIND_VIEW) + { + return false; + } if (!IsDistributedTable(rangeTableEntry->relid)) { diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 33651f00a..b1a5f5637 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -1,13 +1,13 @@ --- File to create functions and helpers needed for subsequent tests +-- File to CREATE FUNCTIONs and helpers needed for subsequent tests -- create a helper function to create objects on each node -CREATE FUNCTION run_command_on_master_and_workers(p_sql text) +CREATE OR REPLACE FUNCTION run_command_on_master_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; PERFORM run_command_on_workers(p_sql); END;$$; -- Create a function to make sure that queries returning the same result -CREATE FUNCTION raise_failed_execution(query text) RETURNS void AS $$ +CREATE OR REPLACE FUNCTION raise_failed_execution(query text) RETURNS void AS $$ BEGIN EXECUTE query; EXCEPTION WHEN OTHERS THEN @@ -29,8 +29,22 @@ BEGIN END LOOP; RETURN; END; $$ language plpgsql; +-- Is a distributed plan? +CREATE OR REPLACE FUNCTION plan_is_distributed(explain_commmand text) +RETURNS BOOLEAN AS $$ +DECLARE + query_plan TEXT; +BEGIN + FOR query_plan IN execute explain_commmand LOOP + IF query_plan LIKE '%Task Count:%' + THEN + RETURN TRUE; + END IF; + END LOOP; + RETURN FALSE; +END; $$ language plpgsql; -- helper function to quickly run SQL on the whole cluster -CREATE FUNCTION run_command_on_coordinator_and_workers(p_sql text) +CREATE OR REPLACE FUNCTION run_command_on_coordinator_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; @@ -38,7 +52,7 @@ BEGIN END;$$; -- 1. Marks the given procedure as colocated with the given table. -- 2. Marks the argument index with which we route the procedure. -CREATE FUNCTION colocate_proc_with_table(procname text, tablerelid regclass, argument_index int) +CREATE OR REPLACE FUNCTION colocate_proc_with_table(procname text, tablerelid regclass, argument_index int) RETURNS void LANGUAGE plpgsql AS $$ BEGIN update citus.pg_dist_object @@ -66,7 +80,7 @@ BEGIN RETURN true; END; $func$; -CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000) +CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000) RETURNS void LANGUAGE C STRICT AS 'citus'; @@ -80,7 +94,7 @@ SELECT pg_reload_conf(); (1 row) -- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator -CREATE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636) +CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636) RETURNS BOOLEAN LANGUAGE sql AS $$ diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 9ee24b477..287475d58 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -244,6 +244,51 @@ HINT: Consider using an equality filter on the distributed table's partition co SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE; ERROR: could not run distributed query with FOR UPDATE/SHARE commands HINT: Consider using an equality filter on the distributed table's partition column. +-- +-- Joins between reference tables and views shouldn't be planned locally. +-- +CREATE VIEW numbers_v AS SELECT * FROM numbers WHERE a=1; +SELECT public.coordinator_plan($Q$ +EXPLAIN (COSTS FALSE) + SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; +$Q$); + coordinator_plan +------------------------------ + Custom Scan (Citus Adaptive) + Task Count: 1 +(2 rows) + +CREATE VIEW local_table_v AS SELECT * FROM local_table WHERE a BETWEEN 1 AND 10; +SELECT public.coordinator_plan($Q$ +EXPLAIN (COSTS FALSE) + SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; +$Q$); + coordinator_plan +------------------------------------------------ + Custom Scan (Citus Adaptive) + -> Distributed Subplan 24_1 + -> Seq Scan on local_table + Filter: ((a >= 1) AND (a <= 10)) + Task Count: 1 +(5 rows) + +DROP VIEW numbers_v, local_table_v; +-- +-- Joins between reference tables and materialized views are allowed to +-- be planned locally +-- +CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10; +LOG: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 numbers WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10)) +REFRESH MATERIALIZED VIEW numbers_v; +SELECT public.plan_is_distributed($Q$ +EXPLAIN (COSTS FALSE) + SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; +$Q$); + plan_is_distributed +--------------------- + f +(1 row) + -- verify that we can drop columns from reference tables replicated to the coordinator -- see https://github.com/citusdata/citus/issues/3279 ALTER TABLE squares DROP COLUMN b; diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index 160bcb7f0..d3cb7b07e 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -1,7 +1,7 @@ --- File to create functions and helpers needed for subsequent tests +-- File to CREATE FUNCTIONs and helpers needed for subsequent tests -- create a helper function to create objects on each node -CREATE FUNCTION run_command_on_master_and_workers(p_sql text) +CREATE OR REPLACE FUNCTION run_command_on_master_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; @@ -9,7 +9,7 @@ BEGIN END;$$; -- Create a function to make sure that queries returning the same result -CREATE FUNCTION raise_failed_execution(query text) RETURNS void AS $$ +CREATE OR REPLACE FUNCTION raise_failed_execution(query text) RETURNS void AS $$ BEGIN EXECUTE query; EXCEPTION WHEN OTHERS THEN @@ -33,8 +33,23 @@ BEGIN RETURN; END; $$ language plpgsql; +-- Is a distributed plan? +CREATE OR REPLACE FUNCTION plan_is_distributed(explain_commmand text) +RETURNS BOOLEAN AS $$ +DECLARE + query_plan TEXT; +BEGIN + FOR query_plan IN execute explain_commmand LOOP + IF query_plan LIKE '%Task Count:%' + THEN + RETURN TRUE; + END IF; + END LOOP; + RETURN FALSE; +END; $$ language plpgsql; + -- helper function to quickly run SQL on the whole cluster -CREATE FUNCTION run_command_on_coordinator_and_workers(p_sql text) +CREATE OR REPLACE FUNCTION run_command_on_coordinator_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; @@ -43,7 +58,7 @@ END;$$; -- 1. Marks the given procedure as colocated with the given table. -- 2. Marks the argument index with which we route the procedure. -CREATE FUNCTION colocate_proc_with_table(procname text, tablerelid regclass, argument_index int) +CREATE OR REPLACE FUNCTION colocate_proc_with_table(procname text, tablerelid regclass, argument_index int) RETURNS void LANGUAGE plpgsql AS $$ BEGIN update citus.pg_dist_object @@ -73,7 +88,7 @@ BEGIN END; $func$; -CREATE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000) +CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000) RETURNS void LANGUAGE C STRICT AS 'citus'; @@ -84,7 +99,7 @@ ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500; SELECT pg_reload_conf(); -- Verifies pg_dist_node and pg_dist_palcement in the given worker matches the ones in coordinator -CREATE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636) +CREATE OR REPLACE FUNCTION verify_metadata(hostname TEXT, port INTEGER, master_port INTEGER DEFAULT 57636) RETURNS BOOLEAN LANGUAGE sql AS $$ diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index 0f52cec00..d509f9bd1 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -138,10 +138,41 @@ SELECT a FROM t NATURAL JOIN dist; SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR SHARE; SELECT local_table.a, numbers.a FROM local_table NATURAL JOIN numbers FOR UPDATE; + +-- +-- Joins between reference tables and views shouldn't be planned locally. +-- + +CREATE VIEW numbers_v AS SELECT * FROM numbers WHERE a=1; +SELECT public.coordinator_plan($Q$ +EXPLAIN (COSTS FALSE) + SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; +$Q$); + +CREATE VIEW local_table_v AS SELECT * FROM local_table WHERE a BETWEEN 1 AND 10; +SELECT public.coordinator_plan($Q$ +EXPLAIN (COSTS FALSE) + SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; +$Q$); + +DROP VIEW numbers_v, local_table_v; + +-- +-- Joins between reference tables and materialized views are allowed to +-- be planned locally +-- +CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10; +REFRESH MATERIALIZED VIEW numbers_v; +SELECT public.plan_is_distributed($Q$ +EXPLAIN (COSTS FALSE) + SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; +$Q$); + -- verify that we can drop columns from reference tables replicated to the coordinator -- see https://github.com/citusdata/citus/issues/3279 ALTER TABLE squares DROP COLUMN b; + -- clean-up SET client_min_messages TO ERROR; DROP SCHEMA replicate_ref_to_coordinator CASCADE; From 939d3c955bc5de2a8f688cd533533b64f4a94356 Mon Sep 17 00:00:00 2001 From: Hadi Moshayedi Date: Wed, 11 Dec 2019 16:06:48 -0800 Subject: [PATCH 3/4] Don't plan function joins locally --- .../distributed/executor/multi_executor.c | 21 ++++++++++++ .../distributed/planner/distributed_planner.c | 14 ++++++++ ...licate_reference_tables_to_coordinator.out | 33 ++++++++++++++++++- ...licate_reference_tables_to_coordinator.sql | 25 +++++++++++++- 4 files changed, 91 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 691abf1ff..436ce34be 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -14,6 +14,7 @@ #include "access/xact.h" #include "catalog/dependency.h" +#include "catalog/pg_class.h" #include "catalog/namespace.h" #include "distributed/citus_custom_scan.h" #include "distributed/commands/multi_copy.h" @@ -629,11 +630,31 @@ IsLocalReferenceTableJoinPlan(PlannedStmt *plan) RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); bool onlySearchPath = false; + /* + * Planner's IsLocalReferenceTableJoin() doesn't allow planning functions + * in FROM clause locally. Early exit. We cannot use Assert() here since + * all non-Citus plans might pass through these checks. + */ + if (rangeTableEntry->rtekind == RTE_FUNCTION) + { + return false; + } + if (rangeTableEntry->rtekind != RTE_RELATION) { continue; } + /* + * Planner's IsLocalReferenceTableJoin() doesn't allow planning reference + * table and view join locally. Early exit. We cannot use Assert() here + * since all non-Citus plans might pass through these checks. + */ + if (rangeTableEntry->relkind == RELKIND_VIEW) + { + return false; + } + if (RelationIsAKnownShard(rangeTableEntry->relid, onlySearchPath)) { /* diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 47f75da80..a767dac59 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -1955,6 +1955,20 @@ IsLocalReferenceTableJoin(Query *parse, List *rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); + /* + * Don't plan joins involving functions locally since we are not sure if + * they do distributed accesses or not, and defaulting to local planning + * might break transactional semantics. + * + * For example, Access to the reference table in the function might go + * over a connection, but access to the same reference table outside + * the function will go over the current backend. The snapshot for the + * connection in the function is taken after the statement snapshot, + * so they can see two different views of data. + * + * Looking at gram.y, RTE_TABLEFUNC is used only for XMLTABLE() which + * is okay to be planned locally, so allowing that. + */ if (rangeTableEntry->rtekind == RTE_FUNCTION) { return false; diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 287475d58..37fba7cf4 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -275,7 +275,7 @@ $Q$); DROP VIEW numbers_v, local_table_v; -- -- Joins between reference tables and materialized views are allowed to --- be planned locally +-- be planned locally. -- CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10; LOG: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 numbers WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10)) @@ -289,6 +289,37 @@ $Q$); f (1 row) +BEGIN; +SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; +ERROR: cannot join local tables and reference tables in a transaction block, udf block, or distributed CTE subquery +END; +-- +-- Joins between reference tables, local tables, and function calls shouldn't +-- be planned locally. +-- +SELECT count(*) +FROM local_table a, numbers b, generate_series(1, 10) c +WHERE a.a = b.a AND a.a = c; +ERROR: relation local_table is not distributed +-- but it should be okay if the function call is not a data source +SELECT public.plan_is_distributed($Q$ +EXPLAIN (COSTS FALSE) +SELECT abs(a.a) FROM local_table a, numbers b WHERE a.a = b.a; +$Q$); + plan_is_distributed +--------------------- + f +(1 row) + +SELECT public.plan_is_distributed($Q$ +EXPLAIN (COSTS FALSE) +SELECT a.a FROM local_table a, numbers b WHERE a.a = b.a ORDER BY abs(a.a); +$Q$); + plan_is_distributed +--------------------- + f +(1 row) + -- verify that we can drop columns from reference tables replicated to the coordinator -- see https://github.com/citusdata/citus/issues/3279 ALTER TABLE squares DROP COLUMN b; diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index d509f9bd1..23bcf469f 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -159,7 +159,7 @@ DROP VIEW numbers_v, local_table_v; -- -- Joins between reference tables and materialized views are allowed to --- be planned locally +-- be planned locally. -- CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10; REFRESH MATERIALIZED VIEW numbers_v; @@ -168,6 +168,29 @@ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; $Q$); +BEGIN; +SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a; +END; + +-- +-- Joins between reference tables, local tables, and function calls shouldn't +-- be planned locally. +-- +SELECT count(*) +FROM local_table a, numbers b, generate_series(1, 10) c +WHERE a.a = b.a AND a.a = c; + +-- but it should be okay if the function call is not a data source +SELECT public.plan_is_distributed($Q$ +EXPLAIN (COSTS FALSE) +SELECT abs(a.a) FROM local_table a, numbers b WHERE a.a = b.a; +$Q$); + +SELECT public.plan_is_distributed($Q$ +EXPLAIN (COSTS FALSE) +SELECT a.a FROM local_table a, numbers b WHERE a.a = b.a ORDER BY abs(a.a); +$Q$); + -- verify that we can drop columns from reference tables replicated to the coordinator -- see https://github.com/citusdata/citus/issues/3279 ALTER TABLE squares DROP COLUMN b; From 383d34f51b64d9973d8da3feaac98d0bb81dbc0a Mon Sep 17 00:00:00 2001 From: Hadi Moshayedi Date: Wed, 11 Dec 2019 19:54:15 -0800 Subject: [PATCH 4/4] Tests for multi-statement transactions with subqueries or ctes --- ...licate_reference_tables_to_coordinator.out | 22 +++++++++++++++++++ ...licate_reference_tables_to_coordinator.sql | 22 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 37fba7cf4..83f0c0cf7 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -190,6 +190,28 @@ BEGIN; SELECT local_table.a, r.a FROM local_table NATURAL JOIN s1.ref r ORDER BY 1; ERROR: cannot join local tables and reference tables in a transaction block, udf block, or distributed CTE subquery ROLLBACK; +BEGIN; +WITH t1 AS ( + SELECT random() r, a FROM local_table +) SELECT count(*) FROM t1, numbers WHERE t1.a = numbers.a AND r < 0.5; +ERROR: cannot join local tables and reference tables in a transaction block, udf block, or distributed CTE subquery +END; +BEGIN; +WITH t1 AS ( + SELECT random() r, a FROM numbers +) SELECT count(*) FROM t1, local_table WHERE t1.a = local_table.a AND r < 0.5; +ERROR: cannot join local tables and reference tables in a transaction block, udf block, or distributed CTE subquery +END; +BEGIN; +SELECT count(*) FROM local_table +WHERE EXISTS(SELECT random() FROM numbers WHERE local_table.a = numbers.a); +ERROR: cannot join local tables and reference tables in a transaction block, udf block, or distributed CTE subquery +END; +BEGIN; +SELECT count(*) FROM numbers +WHERE EXISTS(SELECT random() FROM local_table WHERE local_table.a = numbers.a); +ERROR: cannot join local tables and reference tables in a transaction block, udf block, or distributed CTE subquery +END; DROP SCHEMA s1 CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table s1.ref diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index 23bcf469f..7da4aa0fc 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -96,6 +96,28 @@ BEGIN; SELECT local_table.a, r.a FROM local_table NATURAL JOIN s1.ref r ORDER BY 1; ROLLBACK; +BEGIN; +WITH t1 AS ( + SELECT random() r, a FROM local_table +) SELECT count(*) FROM t1, numbers WHERE t1.a = numbers.a AND r < 0.5; +END; + +BEGIN; +WITH t1 AS ( + SELECT random() r, a FROM numbers +) SELECT count(*) FROM t1, local_table WHERE t1.a = local_table.a AND r < 0.5; +END; + +BEGIN; +SELECT count(*) FROM local_table +WHERE EXISTS(SELECT random() FROM numbers WHERE local_table.a = numbers.a); +END; + +BEGIN; +SELECT count(*) FROM numbers +WHERE EXISTS(SELECT random() FROM local_table WHERE local_table.a = numbers.a); +END; + DROP SCHEMA s1 CASCADE; -- error if inside a SQL UDF call