Merge branch 'release-13.0' into fix_7875

pull/7910/head
Muhammad Usama 2025-03-03 16:28:57 +05:00 committed by GitHub
commit 6316bfee0f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 631 additions and 8 deletions

1
citus-tools Submodule

@ -0,0 +1 @@
Subproject commit 3376bd6845f0614908ed304f5033bd644c82d3bf

View File

@ -151,7 +151,10 @@ static RouterPlanType GetRouterPlanType(Query *query,
bool hasUnresolvedParams); bool hasUnresolvedParams);
static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
PlannedStmt *concatPlan); PlannedStmt *concatPlan);
static bool CheckPostPlanDistribution(bool isDistributedQuery,
Query *origQuery,
List *rangeTableList,
Query *plannedQuery);
/* Distributed planner hook */ /* Distributed planner hook */
PlannedStmt * PlannedStmt *
@ -272,6 +275,11 @@ distributed_planner(Query *parse,
planContext.plan = standard_planner(planContext.query, NULL, planContext.plan = standard_planner(planContext.query, NULL,
planContext.cursorOptions, planContext.cursorOptions,
planContext.boundParams); planContext.boundParams);
needsDistributedPlanning = CheckPostPlanDistribution(needsDistributedPlanning,
planContext.originalQuery,
rangeTableList,
planContext.query);
if (needsDistributedPlanning) if (needsDistributedPlanning)
{ {
result = PlanDistributedStmt(&planContext, rteIdCounter); result = PlanDistributedStmt(&planContext, rteIdCounter);
@ -2729,3 +2737,41 @@ WarnIfListHasForeignDistributedTable(List *rangeTableList)
} }
} }
} }
static bool
CheckPostPlanDistribution(bool isDistributedQuery,
Query *origQuery, List *rangeTableList,
Query *plannedQuery)
{
if (isDistributedQuery)
{
Node *origQuals = origQuery->jointree->quals;
Node *plannedQuals = plannedQuery->jointree->quals;
#if PG_VERSION_NUM >= PG_VERSION_17
if (IsMergeQuery(origQuery))
{
origQuals = origQuery->mergeJoinCondition;
plannedQuals = plannedQuery->mergeJoinCondition;
}
#endif
/*
* The WHERE quals have been eliminated by the Postgres planner, possibly by
* an OR clause that was simplified to TRUE. In such cases, we need to check
* if the planned query still requires distributed planning.
*/
if (origQuals != NULL && plannedQuals == NULL)
{
List *rtesPostPlan = ExtractRangeTableEntryList(plannedQuery);
if (list_length(rtesPostPlan) < list_length(rangeTableList))
{
isDistributedQuery = ListContainsDistributedTableRTE(
rtesPostPlan, NULL);
}
}
}
return isDistributedQuery;
}

View File

@ -1583,7 +1583,7 @@ IsLocalTableModification(Oid targetRelationId, Query *query, uint64 shardId,
return true; return true;
} }
if (shardId == INVALID_SHARD_ID && ContainsOnlyLocalTables(rteProperties)) if (shardId == INVALID_SHARD_ID && ContainsOnlyLocalOrReferenceTables(rteProperties))
{ {
return true; return true;
} }

View File

@ -2556,13 +2556,15 @@ AllShardsColocated(List *relationShardList)
/* /*
* ContainsOnlyLocalTables returns true if there is only * ContainsOnlyLocalOrReferenceTables returns true if there are no distributed
* local tables and not any distributed or reference table. * tables in the query. In other words, the query might reference only local
* tables and/or reference tables, but no fully distributed tables.
*/ */
bool bool
ContainsOnlyLocalTables(RTEListProperties *rteProperties) ContainsOnlyLocalOrReferenceTables(RTEListProperties *rteProperties)
{ {
return !rteProperties->hasDistributedTable && !rteProperties->hasReferenceTable; /* If hasDistributedTable is false, then all tables are either local or reference. */
return !rteProperties->hasDistributedTable;
} }

View File

@ -118,7 +118,7 @@ extern bool HasDangerousJoinUsing(List *rtableList, Node *jtnode);
extern Job * RouterJob(Query *originalQuery, extern Job * RouterJob(Query *originalQuery,
PlannerRestrictionContext *plannerRestrictionContext, PlannerRestrictionContext *plannerRestrictionContext,
DeferredErrorMessage **planningError); DeferredErrorMessage **planningError);
extern bool ContainsOnlyLocalTables(RTEListProperties *rteProperties); extern bool ContainsOnlyLocalOrReferenceTables(RTEListProperties *rteProperties);
extern RangeTblEntry * ExtractSourceResultRangeTableEntry(Query *query); extern RangeTblEntry * ExtractSourceResultRangeTableEntry(Query *query);
#endif /* MULTI_ROUTER_PLANNER_H */ #endif /* MULTI_ROUTER_PLANNER_H */

View File

@ -0,0 +1,211 @@
-- This test validates that the query planner correctly handles nested subqueries involving both a
-- local table (t4_pg) and a reference table (t2_ref). The steps are as follows:
--
-- 1. A dedicated schema (issue_7891) is created, and three tables (t2_ref, t4_pg, t6_pg) are set up.
-- 2. The table t2_ref is designated as a reference table using the create_reference_table() function.
-- 3. Sample data is inserted into all tables.
-- 4. An UPDATE is executed on t6_pg. The update uses an EXISTS clause with a nested subquery:
-- - The outer subquery iterates over every row in t4_pg.
-- - The inner subquery selects c15 from t2_ref.
-- 5. The update should occur if the nested subquery returns any row, effectively updating t6_pg's vkey to 43.
-- 6. The final state of t6_pg is displayed to confirm that the update was applied.
--
-- Note: This test was originally designed to detect a planner bug where the nested structure might
-- lead to an incorrect plan (such as a 0-task plan), ensuring proper handling of reference and local tables.
-- https://github.com/citusdata/citus/issues/7891
CREATE SCHEMA issue_7891;
SET search_path TO issue_7891;
-- Create tables
CREATE TABLE t2_ref (
vkey INT,
pkey INT,
c15 TIMESTAMP
);
CREATE TABLE t2_ref2 (
vkey INT,
pkey INT,
c15 TIMESTAMP
);
CREATE TABLE t4_pg (
vkey INT,
pkey INT,
c22 NUMERIC,
c23 TEXT,
c24 TIMESTAMP
);
CREATE TABLE t6_pg (
vkey INT,
pkey INT,
c26 TEXT
);
-- Mark t2_ref and t2_ref2 as a reference table
SELECT create_reference_table('t2_ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
SELECT create_reference_table('t2_ref2');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- Insert sample data
INSERT INTO t6_pg (vkey, pkey, c26) VALUES
(2, 12000, 'initial'),
(3, 13000, 'will_be_deleted'),
(4, 14000, 'to_merge');
INSERT INTO t4_pg (vkey, pkey, c22, c23, c24)
VALUES (5, 15000, 0.0, ']]?', MAKE_TIMESTAMP(2071, 10, 26, 16, 20, 5));
INSERT INTO t2_ref (vkey, pkey, c15)
VALUES (14, 24000, NULL::timestamp);
-- Show initial data
SELECT 't6_pg before' AS label, * FROM t6_pg;
label | vkey | pkey | c26
---------------------------------------------------------------------
t6_pg before | 2 | 12000 | initial
t6_pg before | 3 | 13000 | will_be_deleted
t6_pg before | 4 | 14000 | to_merge
(3 rows)
SELECT 't4_pg data' AS label, * FROM t4_pg;
label | vkey | pkey | c22 | c23 | c24
---------------------------------------------------------------------
t4_pg data | 5 | 15000 | 0.0 | ]]? | Mon Oct 26 16:20:05 2071
(1 row)
SELECT 't2_ref data' AS label, * FROM t2_ref;
label | vkey | pkey | c15
---------------------------------------------------------------------
t2_ref data | 14 | 24000 |
(1 row)
--
-- The problematic query: update t6_pg referencing t4_pg and sub-subquery on t2_ref.
-- Historically might produce a 0-task plan if the planner incorrectly fails to
-- treat t4_pg/t2_ref as local/reference.
--
-- The outer subquery iterates over every row in table t4_pg.
UPDATE t6_pg
SET vkey = 43
WHERE EXISTS (
SELECT (SELECT c15 FROM t2_ref)
FROM t4_pg
);
SELECT 't6_pg after' AS label, * FROM t6_pg;
label | vkey | pkey | c26
---------------------------------------------------------------------
t6_pg after | 43 | 12000 | initial
t6_pg after | 43 | 13000 | will_be_deleted
t6_pg after | 43 | 14000 | to_merge
(3 rows)
--
-- DELETE with a similar nested subquery approach
-- Here, let's delete any rows for which t4_pg is non-empty (like a trivial check).
-- We'll specifically target the row with c26='will_be_deleted' to confirm it's removed.
--
DELETE FROM t6_pg
WHERE EXISTS (
SELECT (SELECT c15 FROM t2_ref)
FROM t4_pg
)
AND c26 = 'will_be_deleted';
SELECT 't6_pg after DELETE' AS label, * FROM t6_pg;
label | vkey | pkey | c26
---------------------------------------------------------------------
t6_pg after DELETE | 43 | 12000 | initial
t6_pg after DELETE | 43 | 14000 | to_merge
(2 rows)
--
-- We'll merge from t4_pg into t6_pg. The merge will update c26 for pkey=14000.
--
-- Anticipate an error indicating non-IMMUTABLE functions are not supported in MERGE statements on distributed tables.
-- Retain this comment to highlight the current limitation.
--
MERGE INTO t6_pg AS tgt
USING t4_pg AS src
ON (tgt.pkey = 14000)
WHEN MATCHED THEN
UPDATE SET c26 = 'merged_' || (SELECT pkey FROM t2_ref WHERE pkey=24000 LIMIT 1)
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c26)
VALUES (99, src.pkey, 'inserted_via_merge');
ERROR: non-IMMUTABLE functions are not yet supported in MERGE sql with distributed tables
MERGE INTO t2_ref AS tgt
USING t4_pg AS src
ON (tgt.pkey = src.pkey)
WHEN MATCHED THEN
UPDATE SET c15 = '2088-01-01 00:00:00'::timestamp
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c15)
VALUES (src.vkey, src.pkey, '2099-12-31 23:59:59'::timestamp);
ERROR: Reference table as target is not allowed in MERGE command
-- Show the final state of t2_ref:
SELECT 't2_ref after MERGE (using t4_pg)' AS label, * FROM t2_ref;
label | vkey | pkey | c15
---------------------------------------------------------------------
t2_ref after MERGE (using t4_pg) | 14 | 24000 |
(1 row)
MERGE INTO t2_ref2 AS tgt
USING t2_ref AS src
ON (tgt.pkey = src.pkey)
WHEN MATCHED THEN
UPDATE SET c15 = '2077-07-07 07:07:07'::timestamp
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c15)
VALUES (src.vkey, src.pkey, '2066-06-06 06:06:06'::timestamp);
ERROR: Reference table as target is not allowed in MERGE command
-- Show the final state of t2_ref2:
SELECT 't2_ref2 after MERGE (using t2_ref)' AS label, * FROM t2_ref2;
label | vkey | pkey | c15
---------------------------------------------------------------------
(0 rows)
MERGE INTO t6_pg AS tgt
USING t4_pg AS src
ON (tgt.pkey = src.pkey)
WHEN MATCHED THEN
UPDATE SET c26 = 'merged_value'
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c26)
VALUES (src.vkey, src.pkey, 'inserted_via_merge');
SELECT 't6_pg after MERGE' AS label, * FROM t6_pg;
label | vkey | pkey | c26
---------------------------------------------------------------------
t6_pg after MERGE | 43 | 12000 | initial
t6_pg after MERGE | 43 | 14000 | to_merge
t6_pg after MERGE | 5 | 15000 | inserted_via_merge
(3 rows)
--
-- Update the REFERENCE table itself and verify the change
-- This is to ensure that the reference table is correctly handled.
UPDATE t2_ref
SET c15 = '2099-01-01 00:00:00'::timestamp
WHERE pkey = 24000;
SELECT 't2_ref after self-update' AS label, * FROM t2_ref;
label | vkey | pkey | c15
---------------------------------------------------------------------
t2_ref after self-update | 14 | 24000 | Thu Jan 01 00:00:00 2099
(1 row)
UPDATE t2_ref
SET c15 = '2099-01-01 00:00:00'::timestamp
WHERE EXISTS (
SELECT 1
FROM t4_pg
);
ERROR: relation t4_pg is not distributed
SELECT 't2_ref after UPDATE' AS label, * FROM t2_ref;
label | vkey | pkey | c15
---------------------------------------------------------------------
t2_ref after UPDATE | 14 | 24000 | Thu Jan 01 00:00:00 2099
(1 row)
-- Cleanup
SET client_min_messages TO WARNING;
DROP SCHEMA issue_7891 CASCADE;

View File

@ -1146,7 +1146,117 @@ WHERE (SELECT COUNT(DISTINCT e1.value_2)
(1 row) (1 row)
-- Test redundant WHERE clause (fix #7782, #7783)
CREATE TABLE t0 (vkey int4, pkey int4, c0 timestamp);
CREATE TABLE t1 (vkey int4, pkey int4, c4 timestamp, c5 text, c6 text);
CREATE TABLE t3 (vkey int4, pkey int4, c9 timestamp);
CREATE TABLE t7 (vkey int4, pkey int4);
-- DEBUG messages not needed for these tests
SET client_min_messages TO DEFAULT; SET client_min_messages TO DEFAULT;
INSERT INTO t0 (vkey, pkey, c0) values
(3, 13000, make_timestamp(2032, 9, 4, 13, 38, 0));
INSERT INTO t7 (vkey, pkey) values
(3, 59525);
SELECT create_reference_table('t1');
create_reference_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('t3', 'c9');
create_distributed_table
---------------------------------------------------------------------
(1 row)
UPDATE t0 set vkey = 117
where (((t0.pkey) in (select t7.vkey from t7 where false
union all
select t3.pkey from t3 where false
)))
or TRUE;
-- Local table t0 is updated
SELECT vkey, pkey, c0 FROM t0;
vkey | pkey | c0
---------------------------------------------------------------------
117 | 13000 | Sat Sep 04 13:38:00 2032
(1 row)
-- MERGE command with redundant join can be planned locally
EXPLAIN (costs off, timing off)
MERGE INTO t0 USING t7 ON
(((t0.pkey) in (select t7.vkey from t7 where false
union all
select t1.pkey from t1 where false
)))
or TRUE
WHEN MATCHED THEN
UPDATE SET vkey = 113;
QUERY PLAN
---------------------------------------------------------------------
Merge on t0
-> Nested Loop
-> Seq Scan on t7
-> Materialize
-> Seq Scan on t0
(5 rows)
-- UPDATE via MERGE with redundant join clause:
MERGE INTO t0 USING t7 ON
(((t0.pkey) in (select t7.vkey from t7 where false
union all
select t1.pkey from t1 where false
)))
or TRUE
WHEN MATCHED THEN
UPDATE SET vkey = 113;
-- Local table t0 is updated
SELECT vkey, pkey, c0 FROM t0;
vkey | pkey | c0
---------------------------------------------------------------------
113 | 13000 | Sat Sep 04 13:38:00 2032
(1 row)
DELETE FROM t0
where TRUE or (((t0.vkey) >= (select
pg_catalog.regexp_count(ref_0.c5, ref_0.c6)
from t1 as ref_0 where true)));
-- Local table t0 is now empty (0 rows)
SELECT vkey, pkey, c0 FROM t0;
vkey | pkey | c0
---------------------------------------------------------------------
(0 rows)
INSERT INTO t3 (vkey, pkey, c9) values
(3, 13000, make_timestamp(2032, 9, 4, 13, 38, 0));
-- Distributed table update with redundant WHERE
UPDATE t3 set vkey = 117
where (((t3.pkey) in (select t1.vkey from t1 where false
union all
select t0.pkey from t0 join t7 on t0.pkey=t7.vkey where false
)))
or TRUE;
SELECT vkey, pkey FROM t3;
vkey | pkey
---------------------------------------------------------------------
117 | 13000
(1 row)
-- Distributed table delete with redundant WHERE
DELETE FROM t3
where TRUE or (((t3.vkey) >= (select
pg_catalog.regexp_count(ref_0.c5, ref_0.c6)
from t1 as ref_0 where true)) and (select max(vkey) from t0) > 0);
-- Distributed table t3 is now empty
SELECT vkey, pkey FROM t3;
vkey | pkey
---------------------------------------------------------------------
(0 rows)
DROP TABLE local_table; DROP TABLE local_table;
DROP TABLE t0;
DROP TABLE t1;
DROP TABLE t3;
DROP TABLE t7;
DROP SCHEMA subquery_in_where CASCADE; DROP SCHEMA subquery_in_where CASCADE;
SET search_path TO public; SET search_path TO public;

View File

@ -103,7 +103,7 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement
test: binary_protocol test: binary_protocol
test: alter_table_set_access_method test: alter_table_set_access_method
test: alter_distributed_table test: alter_distributed_table
test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 issue_7891
test: object_propagation_debug test: object_propagation_debug
test: undistribute_table test: undistribute_table
test: run_command_on_all_nodes test: run_command_on_all_nodes

View File

@ -0,0 +1,169 @@
-- This test validates that the query planner correctly handles nested subqueries involving both a
-- local table (t4_pg) and a reference table (t2_ref). The steps are as follows:
--
-- 1. A dedicated schema (issue_7891) is created, and three tables (t2_ref, t4_pg, t6_pg) are set up.
-- 2. The table t2_ref is designated as a reference table using the create_reference_table() function.
-- 3. Sample data is inserted into all tables.
-- 4. An UPDATE is executed on t6_pg. The update uses an EXISTS clause with a nested subquery:
-- - The outer subquery iterates over every row in t4_pg.
-- - The inner subquery selects c15 from t2_ref.
-- 5. The update should occur if the nested subquery returns any row, effectively updating t6_pg's vkey to 43.
-- 6. The final state of t6_pg is displayed to confirm that the update was applied.
--
-- Note: This test was originally designed to detect a planner bug where the nested structure might
-- lead to an incorrect plan (such as a 0-task plan), ensuring proper handling of reference and local tables.
-- https://github.com/citusdata/citus/issues/7891
CREATE SCHEMA issue_7891;
SET search_path TO issue_7891;
-- Create tables
CREATE TABLE t2_ref (
vkey INT,
pkey INT,
c15 TIMESTAMP
);
CREATE TABLE t2_ref2 (
vkey INT,
pkey INT,
c15 TIMESTAMP
);
CREATE TABLE t4_pg (
vkey INT,
pkey INT,
c22 NUMERIC,
c23 TEXT,
c24 TIMESTAMP
);
CREATE TABLE t6_pg (
vkey INT,
pkey INT,
c26 TEXT
);
-- Mark t2_ref and t2_ref2 as a reference table
SELECT create_reference_table('t2_ref');
SELECT create_reference_table('t2_ref2');
-- Insert sample data
INSERT INTO t6_pg (vkey, pkey, c26) VALUES
(2, 12000, 'initial'),
(3, 13000, 'will_be_deleted'),
(4, 14000, 'to_merge');
INSERT INTO t4_pg (vkey, pkey, c22, c23, c24)
VALUES (5, 15000, 0.0, ']]?', MAKE_TIMESTAMP(2071, 10, 26, 16, 20, 5));
INSERT INTO t2_ref (vkey, pkey, c15)
VALUES (14, 24000, NULL::timestamp);
-- Show initial data
SELECT 't6_pg before' AS label, * FROM t6_pg;
SELECT 't4_pg data' AS label, * FROM t4_pg;
SELECT 't2_ref data' AS label, * FROM t2_ref;
--
-- The problematic query: update t6_pg referencing t4_pg and sub-subquery on t2_ref.
-- Historically might produce a 0-task plan if the planner incorrectly fails to
-- treat t4_pg/t2_ref as local/reference.
--
-- The outer subquery iterates over every row in table t4_pg.
UPDATE t6_pg
SET vkey = 43
WHERE EXISTS (
SELECT (SELECT c15 FROM t2_ref)
FROM t4_pg
);
SELECT 't6_pg after' AS label, * FROM t6_pg;
--
-- DELETE with a similar nested subquery approach
-- Here, let's delete any rows for which t4_pg is non-empty (like a trivial check).
-- We'll specifically target the row with c26='will_be_deleted' to confirm it's removed.
--
DELETE FROM t6_pg
WHERE EXISTS (
SELECT (SELECT c15 FROM t2_ref)
FROM t4_pg
)
AND c26 = 'will_be_deleted';
SELECT 't6_pg after DELETE' AS label, * FROM t6_pg;
--
-- We'll merge from t4_pg into t6_pg. The merge will update c26 for pkey=14000.
--
-- Anticipate an error indicating non-IMMUTABLE functions are not supported in MERGE statements on distributed tables.
-- Retain this comment to highlight the current limitation.
--
MERGE INTO t6_pg AS tgt
USING t4_pg AS src
ON (tgt.pkey = 14000)
WHEN MATCHED THEN
UPDATE SET c26 = 'merged_' || (SELECT pkey FROM t2_ref WHERE pkey=24000 LIMIT 1)
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c26)
VALUES (99, src.pkey, 'inserted_via_merge');
MERGE INTO t2_ref AS tgt
USING t4_pg AS src
ON (tgt.pkey = src.pkey)
WHEN MATCHED THEN
UPDATE SET c15 = '2088-01-01 00:00:00'::timestamp
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c15)
VALUES (src.vkey, src.pkey, '2099-12-31 23:59:59'::timestamp);
-- Show the final state of t2_ref:
SELECT 't2_ref after MERGE (using t4_pg)' AS label, * FROM t2_ref;
MERGE INTO t2_ref2 AS tgt
USING t2_ref AS src
ON (tgt.pkey = src.pkey)
WHEN MATCHED THEN
UPDATE SET c15 = '2077-07-07 07:07:07'::timestamp
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c15)
VALUES (src.vkey, src.pkey, '2066-06-06 06:06:06'::timestamp);
-- Show the final state of t2_ref2:
SELECT 't2_ref2 after MERGE (using t2_ref)' AS label, * FROM t2_ref2;
MERGE INTO t6_pg AS tgt
USING t4_pg AS src
ON (tgt.pkey = src.pkey)
WHEN MATCHED THEN
UPDATE SET c26 = 'merged_value'
WHEN NOT MATCHED THEN
INSERT (vkey, pkey, c26)
VALUES (src.vkey, src.pkey, 'inserted_via_merge');
SELECT 't6_pg after MERGE' AS label, * FROM t6_pg;
--
-- Update the REFERENCE table itself and verify the change
-- This is to ensure that the reference table is correctly handled.
UPDATE t2_ref
SET c15 = '2099-01-01 00:00:00'::timestamp
WHERE pkey = 24000;
SELECT 't2_ref after self-update' AS label, * FROM t2_ref;
UPDATE t2_ref
SET c15 = '2099-01-01 00:00:00'::timestamp
WHERE EXISTS (
SELECT 1
FROM t4_pg
);
SELECT 't2_ref after UPDATE' AS label, * FROM t2_ref;
-- Cleanup
SET client_min_messages TO WARNING;
DROP SCHEMA issue_7891 CASCADE;

View File

@ -847,8 +847,92 @@ WHERE (SELECT COUNT(DISTINCT e1.value_2)
WHERE e1.user_id = u1.user_id WHERE e1.user_id = u1.user_id
) > 115 AND false; ) > 115 AND false;
-- Test redundant WHERE clause (fix #7782, #7783)
CREATE TABLE t0 (vkey int4, pkey int4, c0 timestamp);
CREATE TABLE t1 (vkey int4, pkey int4, c4 timestamp, c5 text, c6 text);
CREATE TABLE t3 (vkey int4, pkey int4, c9 timestamp);
CREATE TABLE t7 (vkey int4, pkey int4);
-- DEBUG messages not needed for these tests
SET client_min_messages TO DEFAULT; SET client_min_messages TO DEFAULT;
INSERT INTO t0 (vkey, pkey, c0) values
(3, 13000, make_timestamp(2032, 9, 4, 13, 38, 0));
INSERT INTO t7 (vkey, pkey) values
(3, 59525);
SELECT create_reference_table('t1');
SELECT create_distributed_table('t3', 'c9');
UPDATE t0 set vkey = 117
where (((t0.pkey) in (select t7.vkey from t7 where false
union all
select t3.pkey from t3 where false
)))
or TRUE;
-- Local table t0 is updated
SELECT vkey, pkey, c0 FROM t0;
-- MERGE command with redundant join can be planned locally
EXPLAIN (costs off, timing off)
MERGE INTO t0 USING t7 ON
(((t0.pkey) in (select t7.vkey from t7 where false
union all
select t1.pkey from t1 where false
)))
or TRUE
WHEN MATCHED THEN
UPDATE SET vkey = 113;
-- UPDATE via MERGE with redundant join clause:
MERGE INTO t0 USING t7 ON
(((t0.pkey) in (select t7.vkey from t7 where false
union all
select t1.pkey from t1 where false
)))
or TRUE
WHEN MATCHED THEN
UPDATE SET vkey = 113;
-- Local table t0 is updated
SELECT vkey, pkey, c0 FROM t0;
DELETE FROM t0
where TRUE or (((t0.vkey) >= (select
pg_catalog.regexp_count(ref_0.c5, ref_0.c6)
from t1 as ref_0 where true)));
-- Local table t0 is now empty (0 rows)
SELECT vkey, pkey, c0 FROM t0;
INSERT INTO t3 (vkey, pkey, c9) values
(3, 13000, make_timestamp(2032, 9, 4, 13, 38, 0));
-- Distributed table update with redundant WHERE
UPDATE t3 set vkey = 117
where (((t3.pkey) in (select t1.vkey from t1 where false
union all
select t0.pkey from t0 join t7 on t0.pkey=t7.vkey where false
)))
or TRUE;
SELECT vkey, pkey FROM t3;
-- Distributed table delete with redundant WHERE
DELETE FROM t3
where TRUE or (((t3.vkey) >= (select
pg_catalog.regexp_count(ref_0.c5, ref_0.c6)
from t1 as ref_0 where true)) and (select max(vkey) from t0) > 0);
-- Distributed table t3 is now empty
SELECT vkey, pkey FROM t3;
DROP TABLE local_table; DROP TABLE local_table;
DROP TABLE t0;
DROP TABLE t1;
DROP TABLE t3;
DROP TABLE t7;
DROP SCHEMA subquery_in_where CASCADE; DROP SCHEMA subquery_in_where CASCADE;
SET search_path TO public; SET search_path TO public;