mirror of https://github.com/citusdata/citus.git
1025 lines
48 KiB
Plaintext
1025 lines
48 KiB
Plaintext
--
|
|
-- PG17
|
|
--
|
|
SHOW server_version \gset
|
|
SELECT substring(:'server_version', '\d+')::int >= 17 AS server_version_ge_17
|
|
\gset
|
|
-- PG17 has the capabilty to pull up a correlated ANY subquery to a join if
|
|
-- the subquery only refers to its immediate parent query. Previously, the
|
|
-- subquery needed to be implemented as a SubPlan node, typically as a
|
|
-- filter on a scan or join node. This PG17 capability enables Citus to
|
|
-- run queries with correlated subqueries in certain cases, as shown here.
|
|
-- Relevant PG commit:
|
|
-- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=9f1337639
|
|
-- This feature is tested for all PG versions, not just PG17; each test query with
|
|
-- a correlated subquery should fail with PG version < 17.0, but the test query
|
|
-- rewritten to reflect how PG17 optimizes it should succeed with PG < 17.0
|
|
CREATE SCHEMA pg17_corr_subq_folding;
|
|
SET search_path TO pg17_corr_subq_folding;
|
|
SET citus.next_shard_id TO 20240017;
|
|
SET citus.shard_count TO 2;
|
|
SET citus.shard_replication_factor TO 1;
|
|
CREATE TABLE test (x int, y int);
|
|
SELECT create_distributed_table('test', 'x');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
INSERT INTO test VALUES (1,1), (2,2);
|
|
-- Query 1: WHERE clause has a correlated subquery with a UNION. PG17 can plan
|
|
-- this as a nested loop join with the subquery as the inner. The correlation
|
|
-- is on the distribution column so the join can be pushed down by Citus.
|
|
explain (costs off)
|
|
SELECT *
|
|
FROM test a
|
|
WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c WHERE a.x = c.x)
|
|
ORDER BY 1,2;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
Sort
|
|
Sort Key: remote_scan.x, remote_scan.y
|
|
-> Custom Scan (Citus Adaptive)
|
|
Task Count: 2
|
|
Tasks Shown: One of 2
|
|
-> Task
|
|
Node: host=localhost port=xxxxx dbname=regression
|
|
-> Nested Loop
|
|
-> Seq Scan on test_20240017 a
|
|
-> Subquery Scan on "ANY_subquery"
|
|
Filter: (a.x = "ANY_subquery".x)
|
|
-> HashAggregate
|
|
Group Key: b.x
|
|
-> Append
|
|
-> Seq Scan on test_20240017 b
|
|
-> Seq Scan on test_20240017 c
|
|
Filter: (a.x = x)
|
|
(17 rows)
|
|
|
|
SET client_min_messages TO DEBUG2;
|
|
SELECT *
|
|
FROM test a
|
|
WHERE x IN (SELECT x FROM test b UNION SELECT y FROM test c WHERE a.x = c.x)
|
|
ORDER BY 1,2;
|
|
DEBUG: Router planner cannot handle multi-shard select queries
|
|
x | y
|
|
---------------------------------------------------------------------
|
|
1 | 1
|
|
2 | 2
|
|
(2 rows)
|
|
|
|
RESET client_min_messages;
|
|
-- Query 1 rewritten with subquery pulled up to a join, as done by PG17 planner;
|
|
-- this query can be run without issues by Citus with older (pre PG17) PGs.
|
|
explain (costs off)
|
|
SELECT a.*
|
|
FROM test a JOIN LATERAL (SELECT x FROM test b UNION SELECT y FROM test c WHERE a.x = c.x) dt1 ON a.x = dt1.x
|
|
ORDER BY 1,2;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
Sort
|
|
Sort Key: remote_scan.x, remote_scan.y
|
|
-> Custom Scan (Citus Adaptive)
|
|
Task Count: 2
|
|
Tasks Shown: One of 2
|
|
-> Task
|
|
Node: host=localhost port=xxxxx dbname=regression
|
|
-> Nested Loop
|
|
-> Seq Scan on test_20240017 a
|
|
-> Subquery Scan on dt1
|
|
Filter: (a.x = dt1.x)
|
|
-> HashAggregate
|
|
Group Key: b.x
|
|
-> Append
|
|
-> Seq Scan on test_20240017 b
|
|
-> Seq Scan on test_20240017 c
|
|
Filter: (a.x = x)
|
|
(17 rows)
|
|
|
|
SET client_min_messages TO DEBUG2;
|
|
SELECT a.*
|
|
FROM test a JOIN LATERAL (SELECT x FROM test b UNION SELECT y FROM test c WHERE a.x = c.x) dt1 ON a.x = dt1.x
|
|
ORDER BY 1,2;
|
|
DEBUG: Router planner cannot handle multi-shard select queries
|
|
x | y
|
|
---------------------------------------------------------------------
|
|
1 | 1
|
|
2 | 2
|
|
(2 rows)
|
|
|
|
RESET client_min_messages;
|
|
CREATE TABLE users (user_id int, time int, dept int, info bigint);
|
|
CREATE TABLE events (user_id int, time int, event_type int, payload text);
|
|
select create_distributed_table('users', 'user_id');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
select create_distributed_table('events', 'user_id');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
insert into users
|
|
select i, 2021 + (i % 3), i % 5, 99999 * i from generate_series(1, 10) i;
|
|
insert into events
|
|
select i % 10 + 1, 2021 + (i % 3), i %11, md5((i*i)::text) from generate_series(1, 100) i;
|
|
-- Query 2. In Citus correlated subqueries can not be used in the WHERE
|
|
-- clause but if the subquery can be pulled up to a join it becomes possible
|
|
-- for Citus to run the query, per this example. Pre PG17 the suqbuery
|
|
-- was implemented as a SubPlan filter on the events table scan.
|
|
EXPLAIN (costs off)
|
|
WITH event_id
|
|
AS(SELECT user_id AS events_user_id,
|
|
time AS events_time,
|
|
event_type
|
|
FROM events)
|
|
SELECT Count(*)
|
|
FROM event_id
|
|
WHERE (events_user_id) IN (SELECT user_id
|
|
FROM users
|
|
WHERE users.time = events_time);
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
Aggregate
|
|
-> Custom Scan (Citus Adaptive)
|
|
Task Count: 2
|
|
Tasks Shown: One of 2
|
|
-> Task
|
|
Node: host=localhost port=xxxxx dbname=regression
|
|
-> Aggregate
|
|
-> Hash Join
|
|
Hash Cond: ((events."time" = users."time") AND (events.user_id = users.user_id))
|
|
-> Seq Scan on events_20240021 events
|
|
-> Hash
|
|
-> HashAggregate
|
|
Group Key: users."time", users.user_id
|
|
-> Seq Scan on users_20240019 users
|
|
(14 rows)
|
|
|
|
SET client_min_messages TO DEBUG2;
|
|
WITH event_id
|
|
AS(SELECT user_id AS events_user_id,
|
|
time AS events_time,
|
|
event_type
|
|
FROM events)
|
|
SELECT Count(*)
|
|
FROM event_id
|
|
WHERE (events_user_id) IN (SELECT user_id
|
|
FROM users
|
|
WHERE users.time = events_time);
|
|
DEBUG: CTE event_id is going to be inlined via distributed planning
|
|
DEBUG: Router planner cannot handle multi-shard select queries
|
|
count
|
|
---------------------------------------------------------------------
|
|
31
|
|
(1 row)
|
|
|
|
RESET client_min_messages;
|
|
-- Query 2 rewritten with subquery pulled up to a join, as done by pg17 planner. Citus
|
|
-- Citus is able to run this query with previous pg versions. Note that the CTE can be
|
|
-- disregarded because it is inlined, being only referenced once.
|
|
EXPLAIN (COSTS OFF)
|
|
SELECT Count(*)
|
|
FROM (SELECT user_id AS events_user_id,
|
|
time AS events_time,
|
|
event_type FROM events) dt1
|
|
INNER JOIN (SELECT distinct user_id, time FROM users) dt
|
|
ON events_user_id = dt.user_id and events_time = dt.time;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
Aggregate
|
|
-> Custom Scan (Citus Adaptive)
|
|
Task Count: 2
|
|
Tasks Shown: One of 2
|
|
-> Task
|
|
Node: host=localhost port=xxxxx dbname=regression
|
|
-> Aggregate
|
|
-> Hash Join
|
|
Hash Cond: ((events.user_id = users.user_id) AND (events."time" = users."time"))
|
|
-> Seq Scan on events_20240021 events
|
|
-> Hash
|
|
-> HashAggregate
|
|
Group Key: users.user_id, users."time"
|
|
-> Seq Scan on users_20240019 users
|
|
(14 rows)
|
|
|
|
SET client_min_messages TO DEBUG2;
|
|
SELECT Count(*)
|
|
FROM (SELECT user_id AS events_user_id,
|
|
time AS events_time,
|
|
event_type FROM events) dt1
|
|
INNER JOIN (SELECT distinct user_id, time FROM users) dt
|
|
ON events_user_id = dt.user_id and events_time = dt.time;
|
|
DEBUG: Router planner cannot handle multi-shard select queries
|
|
count
|
|
---------------------------------------------------------------------
|
|
31
|
|
(1 row)
|
|
|
|
RESET client_min_messages;
|
|
-- Query 3: another example where recursive planning was prevented due to
|
|
-- correlated subqueries, but with PG17 folding the subquery to a join it is
|
|
-- possible for Citus to plan and run the query.
|
|
EXPLAIN (costs off)
|
|
SELECT dept, sum(user_id) FROM
|
|
(SELECT users.dept, users.user_id
|
|
FROM users, events as d1
|
|
WHERE d1.user_id = users.user_id
|
|
AND users.dept IN (3,4)
|
|
AND users.user_id IN
|
|
(SELECT s2.user_id FROM users as s2
|
|
GROUP BY d1.user_id, s2.user_id)) dt
|
|
GROUP BY dept;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: remote_scan.dept
|
|
-> Custom Scan (Citus Adaptive)
|
|
Task Count: 2
|
|
Tasks Shown: One of 2
|
|
-> Task
|
|
Node: host=localhost port=xxxxx dbname=regression
|
|
-> GroupAggregate
|
|
Group Key: users.dept
|
|
-> Sort
|
|
Sort Key: users.dept
|
|
-> Nested Loop Semi Join
|
|
-> Hash Join
|
|
Hash Cond: (d1.user_id = users.user_id)
|
|
-> Seq Scan on events_20240021 d1
|
|
-> Hash
|
|
-> Seq Scan on users_20240019 users
|
|
Filter: (dept = ANY ('{3,4}'::integer[]))
|
|
-> Subquery Scan on "ANY_subquery"
|
|
Filter: (d1.user_id = "ANY_subquery".user_id)
|
|
-> HashAggregate
|
|
Group Key: s2.user_id
|
|
-> Seq Scan on users_20240019 s2
|
|
(23 rows)
|
|
|
|
SET client_min_messages TO DEBUG2;
|
|
SELECT dept, sum(user_id) FROM
|
|
(SELECT users.dept, users.user_id
|
|
FROM users, events as d1
|
|
WHERE d1.user_id = users.user_id
|
|
AND users.dept IN (3,4)
|
|
AND users.user_id IN
|
|
(SELECT s2.user_id FROM users as s2
|
|
GROUP BY d1.user_id, s2.user_id)) dt
|
|
GROUP BY dept;
|
|
DEBUG: Router planner cannot handle multi-shard select queries
|
|
dept | sum
|
|
---------------------------------------------------------------------
|
|
3 | 110
|
|
4 | 130
|
|
(2 rows)
|
|
|
|
RESET client_min_messages;
|
|
-- Query 3 rewritten in a similar way to how the PG17 pulls up the subquery;
|
|
-- the join is on the distribution key so Citus can push down.
|
|
EXPLAIN (costs off)
|
|
SELECT dept, sum(user_id) FROM
|
|
(SELECT users.dept, users.user_id
|
|
FROM users, events as d1
|
|
JOIN LATERAL (SELECT s2.user_id FROM users as s2
|
|
GROUP BY s2.user_id HAVING d1.user_id IS NOT NULL) as d2 ON 1=1
|
|
WHERE d1.user_id = users.user_id
|
|
AND users.dept IN (3,4)
|
|
AND users.user_id = d2.user_id) dt
|
|
GROUP BY dept;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: remote_scan.dept
|
|
-> Custom Scan (Citus Adaptive)
|
|
Task Count: 2
|
|
Tasks Shown: One of 2
|
|
-> Task
|
|
Node: host=localhost port=xxxxx dbname=regression
|
|
-> GroupAggregate
|
|
Group Key: users.dept
|
|
-> Sort
|
|
Sort Key: users.dept
|
|
-> Nested Loop
|
|
-> Hash Join
|
|
Hash Cond: (d1.user_id = users.user_id)
|
|
-> Seq Scan on events_20240021 d1
|
|
-> Hash
|
|
-> Seq Scan on users_20240019 users
|
|
Filter: (dept = ANY ('{3,4}'::integer[]))
|
|
-> Subquery Scan on d2
|
|
Filter: (d1.user_id = d2.user_id)
|
|
-> HashAggregate
|
|
Group Key: s2.user_id
|
|
-> Result
|
|
One-Time Filter: (d1.user_id IS NOT NULL)
|
|
-> Seq Scan on users_20240019 s2
|
|
(25 rows)
|
|
|
|
SET client_min_messages TO DEBUG2;
|
|
SELECT dept, sum(user_id) FROM
|
|
(SELECT users.dept, users.user_id
|
|
FROM users, events as d1
|
|
JOIN LATERAL (SELECT s2.user_id FROM users as s2
|
|
GROUP BY s2.user_id HAVING d1.user_id IS NOT NULL) as d2 ON 1=1
|
|
WHERE d1.user_id = users.user_id
|
|
AND users.dept IN (3,4)
|
|
AND users.user_id = d2.user_id) dt
|
|
GROUP BY dept;
|
|
DEBUG: Router planner cannot handle multi-shard select queries
|
|
dept | sum
|
|
---------------------------------------------------------------------
|
|
3 | 110
|
|
4 | 130
|
|
(2 rows)
|
|
|
|
RESET client_min_messages;
|
|
RESET search_path;
|
|
DROP SCHEMA pg17_corr_subq_folding CASCADE;
|
|
NOTICE: drop cascades to 3 other objects
|
|
DETAIL: drop cascades to table pg17_corr_subq_folding.test
|
|
drop cascades to table pg17_corr_subq_folding.users
|
|
drop cascades to table pg17_corr_subq_folding.events
|
|
\if :server_version_ge_17
|
|
\else
|
|
\q
|
|
\endif
|
|
-- PG17-specific tests go here.
|
|
--
|
|
CREATE SCHEMA pg17;
|
|
SET search_path to pg17;
|
|
-- Test specifying access method on partitioned tables. PG17 feature, added by:
|
|
-- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=374c7a229
|
|
-- The following tests were failing tests in tableam but will pass on PG >= 17.
|
|
-- There is some set-up duplication of tableam, and this test can be returned
|
|
-- to tableam when 17 is the minimum supported PG version.
|
|
SELECT public.run_command_on_coordinator_and_workers($Q$
|
|
SET citus.enable_ddl_propagation TO off;
|
|
CREATE FUNCTION fake_am_handler(internal)
|
|
RETURNS table_am_handler
|
|
AS 'citus'
|
|
LANGUAGE C;
|
|
CREATE ACCESS METHOD fake_am TYPE TABLE HANDLER fake_am_handler;
|
|
$Q$);
|
|
run_command_on_coordinator_and_workers
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- Since Citus assumes access methods are part of the extension, make fake_am
|
|
-- owned manually to be able to pass checks on Citus while distributing tables.
|
|
ALTER EXTENSION citus ADD ACCESS METHOD fake_am;
|
|
CREATE TABLE test_partitioned(id int, p int, val int)
|
|
PARTITION BY RANGE (p) USING fake_am;
|
|
-- Test that children inherit access method from parent
|
|
CREATE TABLE test_partitioned_p1 PARTITION OF test_partitioned
|
|
FOR VALUES FROM (1) TO (10);
|
|
CREATE TABLE test_partitioned_p2 PARTITION OF test_partitioned
|
|
FOR VALUES FROM (11) TO (20);
|
|
INSERT INTO test_partitioned VALUES (1, 5, -1), (2, 15, -2);
|
|
WARNING: fake_tuple_insert
|
|
WARNING: fake_tuple_insert
|
|
INSERT INTO test_partitioned VALUES (3, 6, -6), (4, 16, -4);
|
|
WARNING: fake_tuple_insert
|
|
WARNING: fake_tuple_insert
|
|
SELECT count(1) FROM test_partitioned_p1;
|
|
WARNING: fake_scan_getnextslot
|
|
WARNING: fake_scan_getnextslot
|
|
WARNING: fake_scan_getnextslot
|
|
count
|
|
---------------------------------------------------------------------
|
|
2
|
|
(1 row)
|
|
|
|
SELECT count(1) FROM test_partitioned_p2;
|
|
WARNING: fake_scan_getnextslot
|
|
WARNING: fake_scan_getnextslot
|
|
WARNING: fake_scan_getnextslot
|
|
count
|
|
---------------------------------------------------------------------
|
|
2
|
|
(1 row)
|
|
|
|
-- Both child table partitions inherit fake_am
|
|
SELECT c.relname, am.amname FROM pg_class c, pg_am am
|
|
WHERE c.relam = am.oid AND c.oid IN ('test_partitioned_p1'::regclass, 'test_partitioned_p2'::regclass)
|
|
ORDER BY c.relname;
|
|
relname | amname
|
|
---------------------------------------------------------------------
|
|
test_partitioned_p1 | fake_am
|
|
test_partitioned_p2 | fake_am
|
|
(2 rows)
|
|
|
|
-- Clean up
|
|
DROP TABLE test_partitioned;
|
|
ALTER EXTENSION citus DROP ACCESS METHOD fake_am;
|
|
SELECT public.run_command_on_coordinator_and_workers($Q$
|
|
RESET citus.enable_ddl_propagation;
|
|
$Q$);
|
|
run_command_on_coordinator_and_workers
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- End of testing specifying access method on partitioned tables.
|
|
-- MAINTAIN privilege tests
|
|
CREATE ROLE regress_maintain;
|
|
CREATE ROLE regress_no_maintain;
|
|
ALTER ROLE regress_maintain WITH login;
|
|
GRANT USAGE ON SCHEMA pg17 TO regress_maintain;
|
|
ALTER ROLE regress_no_maintain WITH login;
|
|
GRANT USAGE ON SCHEMA pg17 TO regress_no_maintain;
|
|
SET citus.shard_count TO 1; -- For consistent remote command logging
|
|
CREATE TABLE dist_test(a int, b int);
|
|
SELECT create_distributed_table('dist_test', 'a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
INSERT INTO dist_test SELECT i % 10, i FROM generate_series(1, 100) t(i);
|
|
SET citus.log_remote_commands TO on;
|
|
SET citus.grep_remote_commands = '%maintain%';
|
|
GRANT MAINTAIN ON dist_test TO regress_maintain;
|
|
NOTICE: issuing GRANT maintain ON dist_test TO regress_maintain
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing GRANT maintain ON dist_test TO regress_maintain
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SELECT worker_apply_shard_ddl_command (20240023, 'pg17', 'GRANT maintain ON dist_test TO regress_maintain')
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
RESET citus.grep_remote_commands;
|
|
SET ROLE regress_no_maintain;
|
|
-- Current role does not have MAINTAIN privileges on dist_test
|
|
ANALYZE dist_test;
|
|
WARNING: permission denied to analyze "dist_test", skipping it
|
|
NOTICE: issuing ANALYZE pg17.dist_test_20240023
|
|
DETAIL: on server regress_no_maintain@localhost:xxxxx connectionId: xxxxxxx
|
|
VACUUM dist_test;
|
|
WARNING: permission denied to vacuum "dist_test", skipping it
|
|
NOTICE: issuing VACUUM pg17.dist_test_20240023
|
|
DETAIL: on server regress_no_maintain@localhost:xxxxx connectionId: xxxxxxx
|
|
SET ROLE regress_maintain;
|
|
-- Current role has MAINTAIN privileges on dist_test
|
|
ANALYZE dist_test;
|
|
NOTICE: issuing ANALYZE pg17.dist_test_20240023
|
|
DETAIL: on server regress_maintain@localhost:xxxxx connectionId: xxxxxxx
|
|
VACUUM dist_test;
|
|
NOTICE: issuing VACUUM pg17.dist_test_20240023
|
|
DETAIL: on server regress_maintain@localhost:xxxxx connectionId: xxxxxxx
|
|
-- Take away regress_maintain's MAINTAIN privileges on dist_test
|
|
RESET ROLE;
|
|
SET citus.grep_remote_commands = '%maintain%';
|
|
REVOKE MAINTAIN ON dist_test FROM regress_maintain;
|
|
NOTICE: issuing REVOKE maintain ON dist_test FROM regress_maintain
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REVOKE maintain ON dist_test FROM regress_maintain
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SELECT worker_apply_shard_ddl_command (20240023, 'pg17', 'REVOKE maintain ON dist_test FROM regress_maintain')
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
RESET citus.grep_remote_commands;
|
|
SET ROLE regress_maintain;
|
|
-- Current role does not have MAINTAIN privileges on dist_test
|
|
ANALYZE dist_test;
|
|
WARNING: permission denied to analyze "dist_test", skipping it
|
|
NOTICE: issuing ANALYZE pg17.dist_test_20240023
|
|
DETAIL: on server regress_maintain@localhost:xxxxx connectionId: xxxxxxx
|
|
VACUUM dist_test;
|
|
WARNING: permission denied to vacuum "dist_test", skipping it
|
|
NOTICE: issuing VACUUM pg17.dist_test_20240023
|
|
DETAIL: on server regress_maintain@localhost:xxxxx connectionId: xxxxxxx
|
|
RESET ROLE;
|
|
-- End of MAINTAIN privilege tests
|
|
-- Partitions inherit identity column
|
|
RESET citus.log_remote_commands;
|
|
-- PG17 added support for identity columns in partioned tables:
|
|
-- https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=699586315
|
|
-- In particular, partitions with their own identity columns are not allowed.
|
|
-- Citus does not need to propagate identity columns in partitions; the identity
|
|
-- is inherited by PG17 behavior, as shown in this test.
|
|
CREATE TABLE partitioned_table (
|
|
a bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
|
c int
|
|
)
|
|
PARTITION BY RANGE (c);
|
|
CREATE TABLE pt_1 PARTITION OF partitioned_table FOR VALUES FROM (1) TO (50);
|
|
SELECT create_distributed_table('partitioned_table', 'a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE pt_2 PARTITION OF partitioned_table FOR VALUES FROM (50) TO (1000);
|
|
-- (1) The partitioned table has pt_1 and pt_2 as its partitions
|
|
\d+ partitioned_table;
|
|
Partitioned table "pg17.partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: pt_1 FOR VALUES FROM (1) TO (50),
|
|
pt_2 FOR VALUES FROM (50) TO (1000)
|
|
|
|
-- (2) The partitions have the same identity column as the parent table;
|
|
-- This is PG17 behavior for support for identity in partitioned tables.
|
|
\d pt_1;
|
|
Table "pg17.pt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: partitioned_table FOR VALUES FROM (1) TO (50)
|
|
|
|
\d pt_2;
|
|
Table "pg17.pt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: partitioned_table FOR VALUES FROM (50) TO (1000)
|
|
|
|
-- Attaching a partition inherits the identity column from the parent table
|
|
CREATE TABLE pt_3 (a bigint not null, c int);
|
|
ALTER TABLE partitioned_table ATTACH PARTITION pt_3 FOR VALUES FROM (1000) TO (2000);
|
|
\d+ partitioned_table;
|
|
Partitioned table "pg17.partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: pt_1 FOR VALUES FROM (1) TO (50),
|
|
pt_2 FOR VALUES FROM (50) TO (1000),
|
|
pt_3 FOR VALUES FROM (1000) TO (2000)
|
|
|
|
\d pt_3;
|
|
Table "pg17.pt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: partitioned_table FOR VALUES FROM (1000) TO (2000)
|
|
|
|
-- Partition pt_4 has its own identity column, which is not allowed in PG17
|
|
-- and will produce an error on attempting to attach it to the partitioned table
|
|
CREATE TABLE pt_4 (a bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10), c int);
|
|
ALTER TABLE partitioned_table ATTACH PARTITION pt_4 FOR VALUES FROM (2000) TO (3000);
|
|
ERROR: table "pt_4" being attached contains an identity column "a"
|
|
DETAIL: The new partition may not contain an identity column.
|
|
\c - - - :worker_1_port
|
|
SET search_path TO pg17;
|
|
-- Show that DDL for partitioned_table has correctly propagated to the worker node;
|
|
-- (1) The partitioned table has pt_1, pt_2 and pt_3 as its partitions
|
|
\d+ partitioned_table;
|
|
Partitioned table "pg17.partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: pt_1 FOR VALUES FROM (1) TO (50),
|
|
pt_2 FOR VALUES FROM (50) TO (1000),
|
|
pt_3 FOR VALUES FROM (1000) TO (2000)
|
|
|
|
-- (2) The partititions have the same identity column as the parent table
|
|
\d pt_1;
|
|
Table "pg17.pt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: partitioned_table FOR VALUES FROM (1) TO (50)
|
|
|
|
\d pt_2;
|
|
Table "pg17.pt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: partitioned_table FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d pt_3;
|
|
Table "pg17.pt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: partitioned_table FOR VALUES FROM (1000) TO (2000)
|
|
|
|
\c - - - :master_port
|
|
SET search_path TO pg17;
|
|
-- Test detaching a partition with an identity column
|
|
ALTER TABLE partitioned_table DETACH PARTITION pt_3;
|
|
-- partitioned_table has pt_1, pt_2 as its partitions
|
|
-- and pt_3 does not have an identity column
|
|
\d+ partitioned_table;
|
|
Partitioned table "pg17.partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: pt_1 FOR VALUES FROM (1) TO (50),
|
|
pt_2 FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d pt_3;
|
|
Table "pg17.pt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null |
|
|
c | integer | | |
|
|
|
|
-- Verify that the detach has propagated to the worker node
|
|
\c - - - :worker_1_port
|
|
SET search_path TO pg17;
|
|
\d+ partitioned_table;
|
|
Partitioned table "pg17.partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: pt_1 FOR VALUES FROM (1) TO (50),
|
|
pt_2 FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d pt_3;
|
|
Table "pg17.pt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null |
|
|
c | integer | | |
|
|
|
|
\c - - - :master_port
|
|
SET search_path TO pg17;
|
|
CREATE TABLE alt_test (a int, b date, c int) PARTITION BY RANGE(c);
|
|
SELECT create_distributed_table('alt_test', 'a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE alt_test_pt_1 PARTITION OF alt_test FOR VALUES FROM (1) TO (50);
|
|
CREATE TABLE alt_test_pt_2 PARTITION OF alt_test FOR VALUES FROM (50) TO (100);
|
|
-- Citus does not support adding an identity column for a distributed table (#6738)
|
|
-- Attempting to add a column with identity produces an error
|
|
ALTER TABLE alt_test ADD COLUMN d bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10);
|
|
ERROR: cannot execute ADD COLUMN commands involving identity columns when metadata is synchronized to workers
|
|
-- alter table set identity is currently not supported, so adding identity to
|
|
-- an existing column generates an error
|
|
ALTER TABLE alt_test ALTER COLUMN a SET GENERATED BY DEFAULT SET INCREMENT BY 2 SET START WITH 75 RESTART;
|
|
ERROR: alter table command is currently unsupported
|
|
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
|
|
-- Verify that the identity column was not added, on coordinator and worker nodes
|
|
\d+ alt_test;
|
|
Partitioned table "pg17.alt_test"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | integer | | | | plain | |
|
|
b | date | | | | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: alt_test_pt_1 FOR VALUES FROM (1) TO (50),
|
|
alt_test_pt_2 FOR VALUES FROM (50) TO (100)
|
|
|
|
\d alt_test_pt_1;
|
|
Table "pg17.alt_test_pt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | integer | | |
|
|
b | date | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (1) TO (50)
|
|
|
|
\d alt_test_pt_2;
|
|
Table "pg17.alt_test_pt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | integer | | |
|
|
b | date | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (50) TO (100)
|
|
|
|
\c - - - :worker_1_port
|
|
SET search_path TO pg17;
|
|
\d+ alt_test;
|
|
Partitioned table "pg17.alt_test"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | integer | | | | plain | |
|
|
b | date | | | | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: alt_test_pt_1 FOR VALUES FROM (1) TO (50),
|
|
alt_test_pt_2 FOR VALUES FROM (50) TO (100)
|
|
|
|
\d alt_test_pt_1;
|
|
Table "pg17.alt_test_pt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | integer | | |
|
|
b | date | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (1) TO (50)
|
|
|
|
\d alt_test_pt_2;
|
|
Table "pg17.alt_test_pt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | integer | | |
|
|
b | date | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (50) TO (100)
|
|
|
|
\c - - - :master_port
|
|
SET search_path TO pg17;
|
|
DROP TABLE alt_test;
|
|
CREATE TABLE alt_test (a bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
|
b int,
|
|
c int)
|
|
PARTITION BY RANGE(c);
|
|
SELECT create_distributed_table('alt_test', 'b');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE alt_test_pt_1 PARTITION OF alt_test FOR VALUES FROM (1) TO (50);
|
|
CREATE TABLE alt_test_pt_2 PARTITION OF alt_test FOR VALUES FROM (50) TO (100);
|
|
-- Dropping of the identity property from a column is currently not supported;
|
|
-- Attempting to drop identity produces an error
|
|
ALTER TABLE alt_test ALTER COLUMN a DROP IDENTITY;
|
|
ERROR: alter table command is currently unsupported
|
|
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
|
|
-- Verify that alt_test still has identity on column a
|
|
\d+ alt_test;
|
|
Partitioned table "pg17.alt_test"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
b | integer | | | | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: alt_test_pt_1 FOR VALUES FROM (1) TO (50),
|
|
alt_test_pt_2 FOR VALUES FROM (50) TO (100)
|
|
|
|
\d alt_test_pt_1;
|
|
Table "pg17.alt_test_pt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
b | integer | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (1) TO (50)
|
|
|
|
\d alt_test_pt_2;
|
|
Table "pg17.alt_test_pt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
b | integer | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (50) TO (100)
|
|
|
|
\c - - - :worker_1_port
|
|
SET search_path TO pg17;
|
|
\d+ alt_test;
|
|
Partitioned table "pg17.alt_test"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
b | integer | | | | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: alt_test_pt_1 FOR VALUES FROM (1) TO (50),
|
|
alt_test_pt_2 FOR VALUES FROM (50) TO (100)
|
|
|
|
\d alt_test_pt_1;
|
|
Table "pg17.alt_test_pt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
b | integer | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (1) TO (50)
|
|
|
|
\d alt_test_pt_2
|
|
Table "pg17.alt_test_pt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
b | integer | | |
|
|
c | integer | | |
|
|
Partition of: alt_test FOR VALUES FROM (50) TO (100)
|
|
|
|
\c - - - :master_port
|
|
SET search_path TO pg17;
|
|
-- Repeat testing of partitions with identity column on a citus local table
|
|
CREATE TABLE local_partitioned_table (
|
|
a bigint GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 10),
|
|
c int
|
|
)
|
|
PARTITION BY RANGE (c);
|
|
CREATE TABLE lpt_1 PARTITION OF local_partitioned_table FOR VALUES FROM (1) TO (50);
|
|
SELECT citus_add_local_table_to_metadata('local_partitioned_table');
|
|
citus_add_local_table_to_metadata
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- Can create tables as partitions and attach tables as partitions to a citus local table:
|
|
CREATE TABLE lpt_2 PARTITION OF local_partitioned_table FOR VALUES FROM (50) TO (1000);
|
|
CREATE TABLE lpt_3 (a bigint not null, c int);
|
|
ALTER TABLE local_partitioned_table ATTACH PARTITION lpt_3 FOR VALUES FROM (1000) TO (2000);
|
|
-- The partitions have the same identity column as the parent table, on coordinator and worker nodes
|
|
\d+ local_partitioned_table;
|
|
Partitioned table "pg17.local_partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: lpt_1 FOR VALUES FROM (1) TO (50),
|
|
lpt_2 FOR VALUES FROM (50) TO (1000),
|
|
lpt_3 FOR VALUES FROM (1000) TO (2000)
|
|
|
|
\d lpt_1;
|
|
Table "pg17.lpt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: local_partitioned_table FOR VALUES FROM (1) TO (50)
|
|
|
|
\d lpt_2;
|
|
Table "pg17.lpt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: local_partitioned_table FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d lpt_3;
|
|
Table "pg17.lpt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: local_partitioned_table FOR VALUES FROM (1000) TO (2000)
|
|
|
|
\c - - - :worker_1_port
|
|
SET search_path TO pg17;
|
|
\d+ local_partitioned_table;
|
|
Partitioned table "pg17.local_partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: lpt_1 FOR VALUES FROM (1) TO (50),
|
|
lpt_2 FOR VALUES FROM (50) TO (1000),
|
|
lpt_3 FOR VALUES FROM (1000) TO (2000)
|
|
|
|
\d lpt_1;
|
|
Table "pg17.lpt_1"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: local_partitioned_table FOR VALUES FROM (1) TO (50)
|
|
|
|
\d lpt_2;
|
|
Table "pg17.lpt_2"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: local_partitioned_table FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d lpt_3;
|
|
Table "pg17.lpt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity
|
|
c | integer | | |
|
|
Partition of: local_partitioned_table FOR VALUES FROM (1000) TO (2000)
|
|
|
|
\c - - - :master_port
|
|
SET search_path TO pg17;
|
|
-- Test detaching a partition with an identity column from a citus local table
|
|
ALTER TABLE local_partitioned_table DETACH PARTITION lpt_3;
|
|
\d+ local_partitioned_table;
|
|
Partitioned table "pg17.local_partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: lpt_1 FOR VALUES FROM (1) TO (50),
|
|
lpt_2 FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d lpt_3;
|
|
Table "pg17.lpt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null |
|
|
c | integer | | |
|
|
|
|
\c - - - :worker_1_port
|
|
SET search_path TO pg17;
|
|
\d+ local_partitioned_table;
|
|
Partitioned table "pg17.local_partitioned_table"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null | generated by default as identity | plain | |
|
|
c | integer | | | | plain | |
|
|
Partition key: RANGE (c)
|
|
Partitions: lpt_1 FOR VALUES FROM (1) TO (50),
|
|
lpt_2 FOR VALUES FROM (50) TO (1000)
|
|
|
|
\d lpt_3;
|
|
Table "pg17.lpt_3"
|
|
Column | Type | Collation | Nullable | Default
|
|
---------------------------------------------------------------------
|
|
a | bigint | | not null |
|
|
c | integer | | |
|
|
|
|
\c - - - :master_port
|
|
SET search_path TO pg17;
|
|
DROP TABLE partitioned_table;
|
|
DROP TABLE local_partitioned_table;
|
|
DROP TABLE lpt_3;
|
|
DROP TABLE pt_3;
|
|
DROP TABLE pt_4;
|
|
DROP TABLE alt_test;
|
|
-- End of partition with identity columns testing
|
|
-- Correlated sublinks are now supported as of PostgreSQL 17, resolving issue #4470.
|
|
-- Enable DEBUG-level logging to capture detailed execution plans
|
|
-- Create the tables
|
|
CREATE TABLE postgres_table (key int, value text, value_2 jsonb);
|
|
CREATE TABLE reference_table (key int, value text, value_2 jsonb);
|
|
SELECT create_reference_table('reference_table');
|
|
create_reference_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE distributed_table (key int, value text, value_2 jsonb);
|
|
SELECT create_distributed_table('distributed_table', 'key');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- Insert test data
|
|
INSERT INTO postgres_table SELECT i, i::varchar(256), '{}'::jsonb FROM generate_series(1, 10) i;
|
|
INSERT INTO reference_table SELECT i, i::varchar(256), '{}'::jsonb FROM generate_series(1, 10) i;
|
|
INSERT INTO distributed_table SELECT i, i::varchar(256), '{}'::jsonb FROM generate_series(1, 10) i;
|
|
-- Set local table join policy to auto before running the tests
|
|
SET citus.local_table_join_policy TO 'auto';
|
|
SET client_min_messages TO DEBUG1;
|
|
-- Correlated sublinks are supported in PostgreSQL 17
|
|
SELECT COUNT(*) FROM distributed_table d1 JOIN postgres_table USING (key)
|
|
WHERE d1.key IN (SELECT key FROM distributed_table WHERE d1.key = key AND key = 5);
|
|
DEBUG: Wrapping relation "postgres_table" to a subquery
|
|
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM pg17.postgres_table WHERE (key OPERATOR(pg_catalog.=) 5)
|
|
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (pg17.distributed_table d1 JOIN (SELECT postgres_table_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) postgres_table_1) postgres_table USING (key)) WHERE (d1.key OPERATOR(pg_catalog.=) ANY (SELECT distributed_table.key FROM pg17.distributed_table WHERE ((d1.key OPERATOR(pg_catalog.=) distributed_table.key) AND (distributed_table.key OPERATOR(pg_catalog.=) 5))))
|
|
count
|
|
---------------------------------------------------------------------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT COUNT(*) FROM distributed_table d1 JOIN postgres_table USING (key)
|
|
WHERE d1.key IN (SELECT key FROM distributed_table WHERE d1.key = key AND key = 5);
|
|
DEBUG: Wrapping relation "postgres_table" to a subquery
|
|
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM pg17.postgres_table WHERE (key OPERATOR(pg_catalog.=) 5)
|
|
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (pg17.distributed_table d1 JOIN (SELECT postgres_table_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) postgres_table_1) postgres_table USING (key)) WHERE (d1.key OPERATOR(pg_catalog.=) ANY (SELECT distributed_table.key FROM pg17.distributed_table WHERE ((d1.key OPERATOR(pg_catalog.=) distributed_table.key) AND (distributed_table.key OPERATOR(pg_catalog.=) 5))))
|
|
count
|
|
---------------------------------------------------------------------
|
|
1
|
|
(1 row)
|
|
|
|
SET citus.local_table_join_policy TO 'prefer-distributed';
|
|
SELECT COUNT(*) FROM distributed_table d1 JOIN postgres_table USING (key)
|
|
WHERE d1.key IN (SELECT key FROM distributed_table WHERE d1.key = key AND key = 5);
|
|
DEBUG: Wrapping relation "distributed_table" "d1" to a subquery
|
|
DEBUG: generating subplan XXX_1 for subquery SELECT key FROM pg17.distributed_table d1 WHERE (key OPERATOR(pg_catalog.=) 5)
|
|
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT d1_1.key, NULL::text AS value, NULL::jsonb AS value_2 FROM (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) d1_1) d1 JOIN pg17.postgres_table USING (key)) WHERE (d1.key OPERATOR(pg_catalog.=) ANY (SELECT distributed_table.key FROM pg17.distributed_table WHERE ((d1.key OPERATOR(pg_catalog.=) distributed_table.key) AND (distributed_table.key OPERATOR(pg_catalog.=) 5))))
|
|
ERROR: direct joins between distributed and local tables are not supported
|
|
HINT: Use CTE's or subqueries to select from local tables and use them in joins
|
|
RESET citus.local_table_join_policy;
|
|
RESET client_min_messages;
|
|
DROP TABLE reference_table;
|
|
-- End for Correlated sublinks are now supported as of PostgreSQL 17, resolving issue #4470.
|
|
DROP SCHEMA pg17 CASCADE;
|
|
NOTICE: drop cascades to 5 other objects
|
|
DETAIL: drop cascades to function fake_am_handler(internal)
|
|
drop cascades to access method fake_am
|
|
drop cascades to table dist_test
|
|
drop cascades to table postgres_table
|
|
drop cascades to table distributed_table
|
|
DROP ROLE regress_maintain;
|
|
DROP ROLE regress_no_maintain;
|