mirror of https://github.com/citusdata/citus.git
Fix regression test output changes post-6.0
parent
0f490b13f2
commit
1f53974f10
|
@ -688,16 +688,16 @@ Master Query
|
||||||
-> Seq Scan on pg_merge_job_570038
|
-> Seq Scan on pg_merge_job_570038
|
||||||
SET citus.task_executor_type TO 'real-time';
|
SET citus.task_executor_type TO 'real-time';
|
||||||
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||||
EXPLAIN EXECUTE router_executor_query;
|
EXPLAIN (COSTS FALSE) EXECUTE router_executor_query;
|
||||||
Distributed Query into pg_merge_job_570039
|
Distributed Query into pg_merge_job_570039
|
||||||
Executor: Router
|
Executor: Router
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Bitmap Heap Scan on lineitem_290000 lineitem (cost=4.30..13.44 rows=3 width=18)
|
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||||
Recheck Cond: (l_orderkey = 5)
|
Recheck Cond: (l_orderkey = 5)
|
||||||
-> Bitmap Index Scan on lineitem_pkey_290000 (cost=0.00..4.30 rows=3 width=0)
|
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||||
Index Cond: (l_orderkey = 5)
|
Index Cond: (l_orderkey = 5)
|
||||||
PREPARE real_time_executor_query AS
|
PREPARE real_time_executor_query AS
|
||||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||||
|
|
|
@ -659,16 +659,16 @@ Master Query
|
||||||
-> Seq Scan on pg_merge_job_570038
|
-> Seq Scan on pg_merge_job_570038
|
||||||
SET citus.task_executor_type TO 'real-time';
|
SET citus.task_executor_type TO 'real-time';
|
||||||
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||||
EXPLAIN EXECUTE router_executor_query;
|
EXPLAIN (COSTS FALSE) EXECUTE router_executor_query;
|
||||||
Distributed Query into pg_merge_job_570039
|
Distributed Query into pg_merge_job_570039
|
||||||
Executor: Router
|
Executor: Router
|
||||||
Task Count: 1
|
Task Count: 1
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Bitmap Heap Scan on lineitem_290000 lineitem (cost=4.30..13.44 rows=3 width=18)
|
-> Bitmap Heap Scan on lineitem_290000 lineitem
|
||||||
Recheck Cond: (l_orderkey = 5)
|
Recheck Cond: (l_orderkey = 5)
|
||||||
-> Bitmap Index Scan on lineitem_pkey_290000 (cost=0.00..4.30 rows=3 width=0)
|
-> Bitmap Index Scan on lineitem_pkey_290000
|
||||||
Index Cond: (l_orderkey = 5)
|
Index Cond: (l_orderkey = 5)
|
||||||
PREPARE real_time_executor_query AS
|
PREPARE real_time_executor_query AS
|
||||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||||
|
|
|
@ -108,9 +108,9 @@ CREATE TABLE clustered_table (
|
||||||
CREATE INDEX clustered_time_idx ON clustered_table (received_at);
|
CREATE INDEX clustered_time_idx ON clustered_table (received_at);
|
||||||
CLUSTER clustered_table USING clustered_time_idx;
|
CLUSTER clustered_table USING clustered_time_idx;
|
||||||
SELECT table_ddl_command_array('clustered_table');
|
SELECT table_ddl_command_array('clustered_table');
|
||||||
table_ddl_command_array
|
table_ddl_command_array
|
||||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
{"CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL)","CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at)","ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx"}
|
{"CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL)","CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) TABLESPACE pg_default","ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx"}
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- fiddly things like storage type and statistics also work
|
-- fiddly things like storage type and statistics also work
|
||||||
|
|
|
@ -0,0 +1,156 @@
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 610000;
|
||||||
|
-- ===================================================================
|
||||||
|
-- create test functions
|
||||||
|
-- ===================================================================
|
||||||
|
CREATE FUNCTION table_ddl_command_array(regclass)
|
||||||
|
RETURNS text[]
|
||||||
|
AS 'citus'
|
||||||
|
LANGUAGE C STRICT;
|
||||||
|
-- ===================================================================
|
||||||
|
-- test ddl command generation functionality
|
||||||
|
-- ===================================================================
|
||||||
|
-- first make sure a simple table works
|
||||||
|
CREATE TABLE simple_table (
|
||||||
|
first_name text,
|
||||||
|
last_name text,
|
||||||
|
id bigint
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('simple_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
-----------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.simple_table (first_name text, last_name text, id bigint)"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- ensure not-null constraints are propagated
|
||||||
|
CREATE TABLE not_null_table (
|
||||||
|
city text,
|
||||||
|
id bigint not null
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('not_null_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.not_null_table (city text, id bigint NOT NULL)"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- ensure tables not in search path are schema-prefixed
|
||||||
|
CREATE SCHEMA not_in_path CREATE TABLE simple_table (id bigint);
|
||||||
|
SELECT table_ddl_command_array('not_in_path.simple_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
-------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE SCHEMA IF NOT EXISTS not_in_path","CREATE TABLE not_in_path.simple_table (id bigint)"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- even more complex constraints should be preserved...
|
||||||
|
CREATE TABLE column_constraint_table (
|
||||||
|
first_name text,
|
||||||
|
last_name text,
|
||||||
|
age int CONSTRAINT non_negative_age CHECK (age >= 0)
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('column_constraint_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
----------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0))"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- including table constraints
|
||||||
|
CREATE TABLE table_constraint_table (
|
||||||
|
bid_item_id bigint,
|
||||||
|
min_bid decimal not null,
|
||||||
|
max_bid decimal not null,
|
||||||
|
CONSTRAINT bids_ordered CHECK (min_bid > max_bid)
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('table_constraint_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid))"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- default values are supported
|
||||||
|
CREATE TABLE default_value_table (
|
||||||
|
name text,
|
||||||
|
price decimal default 0.00
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('default_value_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
-------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00)"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- of course primary keys work...
|
||||||
|
CREATE TABLE pkey_table (
|
||||||
|
first_name text,
|
||||||
|
last_name text,
|
||||||
|
id bigint PRIMARY KEY
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('pkey_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL)","ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id)"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- as do unique indexes...
|
||||||
|
CREATE TABLE unique_table (
|
||||||
|
user_id bigint not null,
|
||||||
|
username text UNIQUE not null
|
||||||
|
);
|
||||||
|
SELECT table_ddl_command_array('unique_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL)","ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username)"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- and indexes used for clustering
|
||||||
|
CREATE TABLE clustered_table (
|
||||||
|
data json not null,
|
||||||
|
received_at timestamp not null
|
||||||
|
);
|
||||||
|
CREATE INDEX clustered_time_idx ON clustered_table (received_at);
|
||||||
|
CLUSTER clustered_table USING clustered_time_idx;
|
||||||
|
SELECT table_ddl_command_array('clustered_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL)","CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at)","ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fiddly things like storage type and statistics also work
|
||||||
|
CREATE TABLE fiddly_table (
|
||||||
|
hostname char(255) not null,
|
||||||
|
os char(255) not null,
|
||||||
|
ip_addr inet not null,
|
||||||
|
traceroute text not null
|
||||||
|
);
|
||||||
|
ALTER TABLE fiddly_table
|
||||||
|
ALTER hostname SET STORAGE PLAIN,
|
||||||
|
ALTER os SET STORAGE MAIN,
|
||||||
|
ALTER ip_addr SET STORAGE EXTENDED,
|
||||||
|
ALTER traceroute SET STORAGE EXTERNAL,
|
||||||
|
ALTER ip_addr SET STATISTICS 500;
|
||||||
|
SELECT table_ddl_command_array('fiddly_table');
|
||||||
|
table_ddl_command_array
|
||||||
|
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL)","ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- test foreign tables using fake FDW
|
||||||
|
CREATE FOREIGN TABLE foreign_table (
|
||||||
|
id bigint not null,
|
||||||
|
full_name text not null default ''
|
||||||
|
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true');
|
||||||
|
SELECT table_ddl_command_array('foreign_table');
|
||||||
|
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
|
||||||
|
table_ddl_command_array
|
||||||
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
{"CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw","CREATE FOREIGN TABLE public.foreign_table (id bigint NOT NULL, full_name text DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true')"}
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- propagating views is not supported
|
||||||
|
CREATE VIEW local_view AS SELECT * FROM simple_table;
|
||||||
|
SELECT table_ddl_command_array('local_view');
|
||||||
|
ERROR: public.local_view is not a regular or foreign table
|
||||||
|
-- clean up
|
||||||
|
DROP VIEW IF EXISTS local_view;
|
||||||
|
DROP FOREIGN TABLE IF EXISTS foreign_table;
|
||||||
|
DROP TABLE IF EXISTS simple_table, not_null_table, column_constraint_table,
|
||||||
|
table_constraint_table, default_value_table, pkey_table,
|
||||||
|
unique_table, clustered_table, fiddly_table;
|
|
@ -15,7 +15,7 @@ SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||||
master_get_table_ddl_events
|
master_get_table_ddl_events
|
||||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL)
|
CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL)
|
||||||
CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
|
CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default
|
||||||
ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber)
|
ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber)
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
--
|
||||||
|
-- MULTI_MASTER_PROTOCOL
|
||||||
|
--
|
||||||
|
-- Tests that check the metadata returned by the master node.
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 740000;
|
||||||
|
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||||
|
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||||
|
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||||
|
-------------------+------------+--------------------+---------------+-----------------------
|
||||||
|
t | l_orderkey | 2 | 307200 | 2
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||||
|
master_get_table_ddl_events
|
||||||
|
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL)
|
||||||
|
CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate)
|
||||||
|
ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber)
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
SELECT * FROM master_get_new_shardid();
|
||||||
|
master_get_new_shardid
|
||||||
|
------------------------
|
||||||
|
740000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM master_get_round_robin_candidate_nodes(1);
|
||||||
|
node_name | node_port
|
||||||
|
-----------+-----------
|
||||||
|
localhost | 57638
|
||||||
|
localhost | 57637
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT * FROM master_get_round_robin_candidate_nodes(2);
|
||||||
|
node_name | node_port
|
||||||
|
-----------+-----------
|
||||||
|
localhost | 57637
|
||||||
|
localhost | 57638
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
SELECT * FROM master_get_active_worker_nodes();
|
||||||
|
node_name | node_port
|
||||||
|
-----------+-----------
|
||||||
|
localhost | 57638
|
||||||
|
localhost | 57637
|
||||||
|
(2 rows)
|
||||||
|
|
|
@ -77,7 +77,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) TABLESPACE pg_default
|
||||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||||
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
|
||||||
|
@ -99,7 +99,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
||||||
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
|
||||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||||
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
||||||
|
@ -125,7 +125,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
||||||
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
|
||||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||||
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
||||||
|
@ -144,7 +144,7 @@ SELECT unnest(master_metadata_snapshot());
|
||||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
||||||
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default
|
||||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||||
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
||||||
|
|
|
@ -0,0 +1,155 @@
|
||||||
|
--
|
||||||
|
-- MULTI_METADATA_SNAPSHOT
|
||||||
|
--
|
||||||
|
-- Tests for metadata snapshot functions.
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
|
||||||
|
SELECT nextval('pg_catalog.pg_dist_shard_placement_placementid_seq') AS last_placement_id
|
||||||
|
\gset
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shard_placement_placementid_seq RESTART 100000;
|
||||||
|
-- Create the necessary test utility function
|
||||||
|
CREATE FUNCTION master_metadata_snapshot()
|
||||||
|
RETURNS text[]
|
||||||
|
LANGUAGE C STRICT
|
||||||
|
AS 'citus';
|
||||||
|
|
||||||
|
COMMENT ON FUNCTION master_metadata_snapshot()
|
||||||
|
IS 'commands to create the metadata snapshot';
|
||||||
|
|
||||||
|
-- Show that none of the existing tables are qualified to be MX tables
|
||||||
|
SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s';
|
||||||
|
logicalrelid | partmethod | partkey | colocationid | repmodel
|
||||||
|
--------------+------------+---------+--------------+----------
|
||||||
|
(0 rows)
|
||||||
|
|
||||||
|
-- Show that, with no MX tables, metadata snapshot contains only the delete commands and
|
||||||
|
-- pg_dist_node entries
|
||||||
|
SELECT unnest(master_metadata_snapshot());
|
||||||
|
unnest
|
||||||
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
TRUNCATE pg_dist_node
|
||||||
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- Create a test table with constraints and SERIAL
|
||||||
|
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 SERIAL);
|
||||||
|
SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT master_create_worker_shards('mx_test_table', 8, 1);
|
||||||
|
master_create_worker_shards
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Set the replication model of the test table to streaming replication so that it is
|
||||||
|
-- considered as an MX table
|
||||||
|
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass;
|
||||||
|
-- Show that the created MX table is included in the metadata snapshot
|
||||||
|
SELECT unnest(master_metadata_snapshot());
|
||||||
|
unnest
|
||||||
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
TRUNCATE pg_dist_node
|
||||||
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
|
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
|
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||||
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport, placementid) VALUES (1310000, 1, 0, 'localhost', 57637, 100000),(1310001, 1, 0, 'localhost', 57638, 100001),(1310002, 1, 0, 'localhost', 57637, 100002),(1310003, 1, 0, 'localhost', 57638, 100003),(1310004, 1, 0, 'localhost', 57637, 100004),(1310005, 1, 0, 'localhost', 57638, 100005),(1310006, 1, 0, 'localhost', 57637, 100006),(1310007, 1, 0, 'localhost', 57638, 100007)
|
||||||
|
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
|
||||||
|
(10 rows)
|
||||||
|
|
||||||
|
-- Show that CREATE INDEX commands are included in the metadata snapshot
|
||||||
|
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||||
|
NOTICE: using one-phase commit for distributed DDL commands
|
||||||
|
HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
|
||||||
|
SELECT unnest(master_metadata_snapshot());
|
||||||
|
unnest
|
||||||
|
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
TRUNCATE pg_dist_node
|
||||||
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
|
CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||||
|
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
|
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||||
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's')
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport, placementid) VALUES (1310000, 1, 0, 'localhost', 57637, 100000),(1310001, 1, 0, 'localhost', 57638, 100001),(1310002, 1, 0, 'localhost', 57637, 100002),(1310003, 1, 0, 'localhost', 57638, 100003),(1310004, 1, 0, 'localhost', 57637, 100004),(1310005, 1, 0, 'localhost', 57638, 100005),(1310006, 1, 0, 'localhost', 57637, 100006),(1310007, 1, 0, 'localhost', 57638, 100007)
|
||||||
|
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
|
||||||
|
(11 rows)
|
||||||
|
|
||||||
|
-- Show that schema changes are included in the metadata snapshot
|
||||||
|
CREATE SCHEMA mx_testing_schema;
|
||||||
|
ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema;
|
||||||
|
WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes
|
||||||
|
HINT: Connect to worker nodes directly to manually change schemas of affected objects.
|
||||||
|
SELECT unnest(master_metadata_snapshot());
|
||||||
|
unnest
|
||||||
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
TRUNCATE pg_dist_node
|
||||||
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
||||||
|
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||||
|
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
|
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||||
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport, placementid) VALUES (1310000, 1, 0, 'localhost', 57637, 100000),(1310001, 1, 0, 'localhost', 57638, 100001),(1310002, 1, 0, 'localhost', 57637, 100002),(1310003, 1, 0, 'localhost', 57638, 100003),(1310004, 1, 0, 'localhost', 57637, 100004),(1310005, 1, 0, 'localhost', 57638, 100005),(1310006, 1, 0, 'localhost', 57637, 100006),(1310007, 1, 0, 'localhost', 57638, 100007)
|
||||||
|
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
|
||||||
|
(12 rows)
|
||||||
|
|
||||||
|
-- Show that append distributed tables are not included in the metadata snapshot
|
||||||
|
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||||
|
SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append');
|
||||||
|
master_create_distributed_table
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||||
|
SELECT unnest(master_metadata_snapshot());
|
||||||
|
unnest
|
||||||
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
TRUNCATE pg_dist_node
|
||||||
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
||||||
|
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||||
|
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
|
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||||
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport, placementid) VALUES (1310000, 1, 0, 'localhost', 57637, 100000),(1310001, 1, 0, 'localhost', 57638, 100001),(1310002, 1, 0, 'localhost', 57637, 100002),(1310003, 1, 0, 'localhost', 57638, 100003),(1310004, 1, 0, 'localhost', 57637, 100004),(1310005, 1, 0, 'localhost', 57638, 100005),(1310006, 1, 0, 'localhost', 57637, 100006),(1310007, 1, 0, 'localhost', 57638, 100007)
|
||||||
|
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
|
||||||
|
(12 rows)
|
||||||
|
|
||||||
|
-- Show that range distributed tables are not included in the metadata snapshot
|
||||||
|
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||||
|
SELECT unnest(master_metadata_snapshot());
|
||||||
|
unnest
|
||||||
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
TRUNCATE pg_dist_node
|
||||||
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition
|
||||||
|
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata) VALUES (2, 2, 'localhost', 57638, 'default', FALSE),(1, 1, 'localhost', 57637, 'default', FALSE)
|
||||||
|
CREATE SCHEMA IF NOT EXISTS mx_testing_schema
|
||||||
|
CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE
|
||||||
|
CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 integer DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL)
|
||||||
|
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||||
|
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||||
|
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||||
|
INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's')
|
||||||
|
INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport, placementid) VALUES (1310000, 1, 0, 'localhost', 57637, 100000),(1310001, 1, 0, 'localhost', 57638, 100001),(1310002, 1, 0, 'localhost', 57637, 100002),(1310003, 1, 0, 'localhost', 57638, 100003),(1310004, 1, 0, 'localhost', 57637, 100004),(1310005, 1, 0, 'localhost', 57638, 100005),(1310006, 1, 0, 'localhost', 57637, 100006),(1310007, 1, 0, 'localhost', 57638, 100007)
|
||||||
|
INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647')
|
||||||
|
(12 rows)
|
||||||
|
|
||||||
|
ALTER SEQUENCE pg_catalog.pg_dist_shard_placement_placementid_seq RESTART :last_placement_id;
|
|
@ -702,7 +702,7 @@ LIMIT
|
||||||
|
|
||||||
-- Same queries above with explain
|
-- Same queries above with explain
|
||||||
-- Simple join subquery pushdown
|
-- Simple join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average
|
avg(array_length(events, 1)) AS event_average
|
||||||
FROM
|
FROM
|
||||||
(SELECT
|
(SELECT
|
||||||
|
@ -729,7 +729,7 @@ FROM
|
||||||
user_id) AS subquery;
|
user_id) AS subquery;
|
||||||
|
|
||||||
-- Union and left join subquery pushdown
|
-- Union and left join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
hasdone
|
hasdone
|
||||||
FROM
|
FROM
|
||||||
|
@ -793,7 +793,7 @@ GROUP BY
|
||||||
hasdone;
|
hasdone;
|
||||||
|
|
||||||
-- Union, left join and having subquery pushdown
|
-- Union, left join and having subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
count_pay
|
count_pay
|
||||||
FROM (
|
FROM (
|
||||||
|
@ -865,7 +865,7 @@ ORDER BY
|
||||||
count_pay;
|
count_pay;
|
||||||
|
|
||||||
-- Lateral join subquery pushdown
|
-- Lateral join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
tenant_id,
|
tenant_id,
|
||||||
user_id,
|
user_id,
|
||||||
user_lastseen,
|
user_lastseen,
|
||||||
|
|
|
@ -740,7 +740,7 @@ LIMIT
|
||||||
|
|
||||||
-- Same queries above with explain
|
-- Same queries above with explain
|
||||||
-- Simple join subquery pushdown
|
-- Simple join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average
|
avg(array_length(events, 1)) AS event_average
|
||||||
FROM
|
FROM
|
||||||
(SELECT
|
(SELECT
|
||||||
|
@ -773,26 +773,26 @@ FROM
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate (cost=40.01..40.02 rows=1 width=16)
|
-> Aggregate
|
||||||
-> GroupAggregate (cost=39.89..39.99 rows=1 width=48)
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Merge Join (cost=39.89..39.97 rows=1 width=540)
|
-> Merge Join
|
||||||
Merge Cond: ((((users.composite_id).tenant_id) = ((events.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events.composite_id).user_id)))
|
Merge Cond: ((((users.composite_id).tenant_id) = ((events.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events.composite_id).user_id)))
|
||||||
-> Sort (cost=28.08..28.09 rows=6 width=32)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Seq Scan on users_270013 users (cost=0.00..28.00 rows=6 width=32)
|
-> Seq Scan on users_270013 users
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Sort (cost=11.81..11.82 rows=3 width=556)
|
-> Sort
|
||||||
Sort Key: ((events.composite_id).tenant_id), ((events.composite_id).user_id)
|
Sort Key: ((events.composite_id).tenant_id), ((events.composite_id).user_id)
|
||||||
-> Seq Scan on events_270009 events (cost=0.00..11.79 rows=3 width=556)
|
-> Seq Scan on events_270009 events
|
||||||
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
||||||
Master Query
|
Master Query
|
||||||
-> Aggregate (cost=0.00..0.00 rows=0 width=0)
|
-> Aggregate
|
||||||
-> Seq Scan on pg_merge_job_270014 (cost=0.00..0.00 rows=0 width=0)
|
-> Seq Scan on pg_merge_job_270014
|
||||||
(22 rows)
|
(22 rows)
|
||||||
|
|
||||||
-- Union and left join subquery pushdown
|
-- Union and left join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
hasdone
|
hasdone
|
||||||
FROM
|
FROM
|
||||||
|
@ -862,47 +862,47 @@ GROUP BY
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> GroupAggregate (cost=91.93..91.98 rows=2 width=48)
|
-> GroupAggregate
|
||||||
Group Key: subquery_top.hasdone
|
Group Key: subquery_top.hasdone
|
||||||
-> Sort (cost=91.93..91.93 rows=2 width=64)
|
-> Sort
|
||||||
Sort Key: subquery_top.hasdone
|
Sort Key: subquery_top.hasdone
|
||||||
-> Subquery Scan on subquery_top (cost=91.85..91.92 rows=2 width=64)
|
-> Subquery Scan on subquery_top
|
||||||
-> GroupAggregate (cost=91.85..91.90 rows=2 width=112)
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
||||||
-> Sort (cost=91.85..91.85 rows=2 width=88)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
||||||
-> Merge Left Join (cost=91.75..91.84 rows=2 width=88)
|
-> Merge Left Join
|
||||||
Merge Cond: ((((users.composite_id).tenant_id) = ((events_2.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events_2.composite_id).user_id)))
|
Merge Cond: ((((users.composite_id).tenant_id) = ((events_2.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events_2.composite_id).user_id)))
|
||||||
-> Unique (cost=79.46..79.48 rows=2 width=56)
|
-> Unique
|
||||||
-> Sort (cost=79.46..79.47 rows=2 width=56)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('action=>1'::text), events.event_time
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('action=>1'::text), events.event_time
|
||||||
-> Append (cost=0.00..79.45 rows=2 width=56)
|
-> Append
|
||||||
-> Nested Loop (cost=0.00..39.72 rows=1 width=56)
|
-> Nested Loop
|
||||||
Join Filter: (((users.composite_id).tenant_id = (events.composite_id).tenant_id) AND ((users.composite_id).user_id = (events.composite_id).user_id))
|
Join Filter: (((users.composite_id).tenant_id = (events.composite_id).tenant_id) AND ((users.composite_id).user_id = (events.composite_id).user_id))
|
||||||
-> Seq Scan on events_270009 events (cost=0.00..11.62 rows=1 width=40)
|
-> Seq Scan on events_270009 events
|
||||||
Filter: ((event_type)::text = 'click'::text)
|
Filter: ((event_type)::text = 'click'::text)
|
||||||
-> Seq Scan on users_270013 users (cost=0.00..28.00 rows=6 width=32)
|
-> Seq Scan on users_270013 users
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Nested Loop (cost=0.00..39.72 rows=1 width=56)
|
-> Nested Loop
|
||||||
Join Filter: (((users_1.composite_id).tenant_id = (events_1.composite_id).tenant_id) AND ((users_1.composite_id).user_id = (events_1.composite_id).user_id))
|
Join Filter: (((users_1.composite_id).tenant_id = (events_1.composite_id).tenant_id) AND ((users_1.composite_id).user_id = (events_1.composite_id).user_id))
|
||||||
-> Seq Scan on events_270009 events_1 (cost=0.00..11.62 rows=1 width=40)
|
-> Seq Scan on events_270009 events_1
|
||||||
Filter: ((event_type)::text = 'submit'::text)
|
Filter: ((event_type)::text = 'submit'::text)
|
||||||
-> Seq Scan on users_270013 users_1 (cost=0.00..28.00 rows=6 width=32)
|
-> Seq Scan on users_270013 users_1
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Materialize (cost=12.29..12.31 rows=1 width=48)
|
-> Materialize
|
||||||
-> Unique (cost=12.29..12.30 rows=1 width=80)
|
-> Unique
|
||||||
-> Sort (cost=12.29..12.29 rows=1 width=80)
|
-> Sort
|
||||||
Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
|
Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
|
||||||
-> Seq Scan on events_270009 events_2 (cost=0.00..12.28 rows=1 width=80)
|
-> Seq Scan on events_270009 events_2
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
||||||
Master Query
|
Master Query
|
||||||
-> HashAggregate (cost=0.00..0.00 rows=0 width=0)
|
-> HashAggregate
|
||||||
Group Key: intermediate_column_270015_2
|
Group Key: intermediate_column_270015_2
|
||||||
-> Seq Scan on pg_merge_job_270015 (cost=0.00..0.00 rows=0 width=0)
|
-> Seq Scan on pg_merge_job_270015
|
||||||
(43 rows)
|
(43 rows)
|
||||||
|
|
||||||
-- Union, left join and having subquery pushdown
|
-- Union, left join and having subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
count_pay
|
count_pay
|
||||||
FROM (
|
FROM (
|
||||||
|
@ -974,7 +974,7 @@ ORDER BY
|
||||||
count_pay;
|
count_pay;
|
||||||
ERROR: bogus varattno for OUTER_VAR var: 3
|
ERROR: bogus varattno for OUTER_VAR var: 3
|
||||||
-- Lateral join subquery pushdown
|
-- Lateral join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
tenant_id,
|
tenant_id,
|
||||||
user_id,
|
user_id,
|
||||||
user_lastseen,
|
user_lastseen,
|
||||||
|
@ -1031,29 +1031,29 @@ LIMIT
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Limit (cost=100.43..100.44 rows=6 width=56)
|
-> Limit
|
||||||
-> Sort (cost=100.43..100.44 rows=6 width=56)
|
-> Sort
|
||||||
Sort Key: (max(users.lastseen)) DESC
|
Sort Key: (max(users.lastseen)) DESC
|
||||||
-> GroupAggregate (cost=100.14..100.29 rows=6 width=56)
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Sort (cost=100.14..100.16 rows=6 width=548)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Nested Loop Left Join (cost=40.04..100.06 rows=6 width=548)
|
-> Nested Loop Left Join
|
||||||
-> Limit (cost=28.08..28.09 rows=6 width=24)
|
-> Limit
|
||||||
-> Sort (cost=28.08..28.09 rows=6 width=24)
|
-> Sort
|
||||||
Sort Key: users.lastseen DESC
|
Sort Key: users.lastseen DESC
|
||||||
-> Seq Scan on users_270013 users (cost=0.00..28.00 rows=6 width=24)
|
-> Seq Scan on users_270013 users
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Limit (cost=11.96..11.96 rows=1 width=524)
|
-> Limit
|
||||||
-> Sort (cost=11.96..11.96 rows=1 width=524)
|
-> Sort
|
||||||
Sort Key: events.event_time DESC
|
Sort Key: events.event_time DESC
|
||||||
-> Seq Scan on events_270009 events (cost=0.00..11.95 rows=1 width=524)
|
-> Seq Scan on events_270009 events
|
||||||
Filter: (((composite_id).tenant_id = ((users.composite_id).tenant_id)) AND ((composite_id).user_id = ((users.composite_id).user_id)))
|
Filter: (((composite_id).tenant_id = ((users.composite_id).tenant_id)) AND ((composite_id).user_id = ((users.composite_id).user_id)))
|
||||||
Master Query
|
Master Query
|
||||||
-> Limit (cost=0.00..0.00 rows=0 width=0)
|
-> Limit
|
||||||
-> Sort (cost=0.00..0.00 rows=0 width=0)
|
-> Sort
|
||||||
Sort Key: intermediate_column_270017_2 DESC
|
Sort Key: intermediate_column_270017_2 DESC
|
||||||
-> Seq Scan on pg_merge_job_270017 (cost=0.00..0.00 rows=0 width=0)
|
-> Seq Scan on pg_merge_job_270017
|
||||||
(29 rows)
|
(29 rows)
|
||||||
|
|
||||||
SET citusdb.task_executor_type TO 'real-time';
|
SET citusdb.task_executor_type TO 'real-time';
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -740,7 +740,7 @@ LIMIT
|
||||||
|
|
||||||
-- Same queries above with explain
|
-- Same queries above with explain
|
||||||
-- Simple join subquery pushdown
|
-- Simple join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average
|
avg(array_length(events, 1)) AS event_average
|
||||||
FROM
|
FROM
|
||||||
(SELECT
|
(SELECT
|
||||||
|
@ -773,26 +773,26 @@ FROM
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Aggregate (cost=40.01..40.02 rows=1 width=32)
|
-> Aggregate
|
||||||
-> GroupAggregate (cost=39.89..39.99 rows=1 width=556)
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Merge Join (cost=39.89..39.97 rows=1 width=556)
|
-> Merge Join
|
||||||
Merge Cond: ((((users.composite_id).tenant_id) = ((events.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events.composite_id).user_id)))
|
Merge Cond: ((((users.composite_id).tenant_id) = ((events.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events.composite_id).user_id)))
|
||||||
-> Sort (cost=28.08..28.09 rows=6 width=32)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Seq Scan on users_270013 users (cost=0.00..28.00 rows=6 width=32)
|
-> Seq Scan on users_270013 users
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Sort (cost=11.81..11.82 rows=3 width=556)
|
-> Sort
|
||||||
Sort Key: ((events.composite_id).tenant_id), ((events.composite_id).user_id)
|
Sort Key: ((events.composite_id).tenant_id), ((events.composite_id).user_id)
|
||||||
-> Seq Scan on events_270009 events (cost=0.00..11.79 rows=3 width=556)
|
-> Seq Scan on events_270009 events
|
||||||
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
|
||||||
Master Query
|
Master Query
|
||||||
-> Aggregate (cost=0.01..0.02 rows=1 width=0)
|
-> Aggregate
|
||||||
-> Seq Scan on pg_merge_job_270014 (cost=0.00..0.00 rows=0 width=0)
|
-> Seq Scan on pg_merge_job_270014
|
||||||
(22 rows)
|
(22 rows)
|
||||||
|
|
||||||
-- Union and left join subquery pushdown
|
-- Union and left join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
hasdone
|
hasdone
|
||||||
FROM
|
FROM
|
||||||
|
@ -862,44 +862,44 @@ GROUP BY
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> HashAggregate (cost=91.94..91.96 rows=2 width=64)
|
-> HashAggregate
|
||||||
Group Key: COALESCE(('Has done paying'::text), 'Has not done paying'::text)
|
Group Key: COALESCE(('Has done paying'::text), 'Has not done paying'::text)
|
||||||
-> GroupAggregate (cost=91.85..91.90 rows=2 width=88)
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
||||||
-> Sort (cost=91.85..91.85 rows=2 width=88)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('Has done paying'::text)
|
||||||
-> Merge Left Join (cost=91.75..91.84 rows=2 width=88)
|
-> Merge Left Join
|
||||||
Merge Cond: ((((users.composite_id).tenant_id) = ((events_2.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events_2.composite_id).user_id)))
|
Merge Cond: ((((users.composite_id).tenant_id) = ((events_2.composite_id).tenant_id)) AND (((users.composite_id).user_id) = ((events_2.composite_id).user_id)))
|
||||||
-> Unique (cost=79.46..79.48 rows=2 width=40)
|
-> Unique
|
||||||
-> Sort (cost=79.46..79.47 rows=2 width=40)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('action=>1'::text), events.event_time
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), ('action=>1'::text), events.event_time
|
||||||
-> Append (cost=0.00..79.45 rows=2 width=40)
|
-> Append
|
||||||
-> Nested Loop (cost=0.00..39.72 rows=1 width=40)
|
-> Nested Loop
|
||||||
Join Filter: (((users.composite_id).tenant_id = (events.composite_id).tenant_id) AND ((users.composite_id).user_id = (events.composite_id).user_id))
|
Join Filter: (((users.composite_id).tenant_id = (events.composite_id).tenant_id) AND ((users.composite_id).user_id = (events.composite_id).user_id))
|
||||||
-> Seq Scan on events_270009 events (cost=0.00..11.62 rows=1 width=40)
|
-> Seq Scan on events_270009 events
|
||||||
Filter: ((event_type)::text = 'click'::text)
|
Filter: ((event_type)::text = 'click'::text)
|
||||||
-> Seq Scan on users_270013 users (cost=0.00..28.00 rows=6 width=32)
|
-> Seq Scan on users_270013 users
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Nested Loop (cost=0.00..39.72 rows=1 width=40)
|
-> Nested Loop
|
||||||
Join Filter: (((users_1.composite_id).tenant_id = (events_1.composite_id).tenant_id) AND ((users_1.composite_id).user_id = (events_1.composite_id).user_id))
|
Join Filter: (((users_1.composite_id).tenant_id = (events_1.composite_id).tenant_id) AND ((users_1.composite_id).user_id = (events_1.composite_id).user_id))
|
||||||
-> Seq Scan on events_270009 events_1 (cost=0.00..11.62 rows=1 width=40)
|
-> Seq Scan on events_270009 events_1
|
||||||
Filter: ((event_type)::text = 'submit'::text)
|
Filter: ((event_type)::text = 'submit'::text)
|
||||||
-> Seq Scan on users_270013 users_1 (cost=0.00..28.00 rows=6 width=32)
|
-> Seq Scan on users_270013 users_1
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Materialize (cost=12.29..12.31 rows=1 width=48)
|
-> Materialize
|
||||||
-> Unique (cost=12.29..12.30 rows=1 width=32)
|
-> Unique
|
||||||
-> Sort (cost=12.29..12.29 rows=1 width=32)
|
-> Sort
|
||||||
Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
|
Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
|
||||||
-> Seq Scan on events_270009 events_2 (cost=0.00..12.28 rows=1 width=32)
|
-> Seq Scan on events_270009 events_2
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
|
||||||
Master Query
|
Master Query
|
||||||
-> HashAggregate (cost=0.00..0.18 rows=10 width=0)
|
-> HashAggregate
|
||||||
Group Key: intermediate_column_270015_2
|
Group Key: intermediate_column_270015_2
|
||||||
-> Seq Scan on pg_merge_job_270015 (cost=0.00..0.00 rows=0 width=0)
|
-> Seq Scan on pg_merge_job_270015
|
||||||
(40 rows)
|
(40 rows)
|
||||||
|
|
||||||
-- Union, left join and having subquery pushdown
|
-- Union, left join and having subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
avg(array_length(events, 1)) AS event_average,
|
avg(array_length(events, 1)) AS event_average,
|
||||||
count_pay
|
count_pay
|
||||||
FROM (
|
FROM (
|
||||||
|
@ -971,7 +971,7 @@ ORDER BY
|
||||||
count_pay;
|
count_pay;
|
||||||
ERROR: bogus varattno for OUTER_VAR var: 3
|
ERROR: bogus varattno for OUTER_VAR var: 3
|
||||||
-- Lateral join subquery pushdown
|
-- Lateral join subquery pushdown
|
||||||
EXPLAIN SELECT
|
EXPLAIN (COSTS FALSE) SELECT
|
||||||
tenant_id,
|
tenant_id,
|
||||||
user_id,
|
user_id,
|
||||||
user_lastseen,
|
user_lastseen,
|
||||||
|
@ -1028,29 +1028,29 @@ LIMIT
|
||||||
Tasks Shown: One of 2
|
Tasks Shown: One of 2
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=57637 dbname=regression
|
Node: host=localhost port=57637 dbname=regression
|
||||||
-> Limit (cost=100.43..100.44 rows=6 width=56)
|
-> Limit
|
||||||
-> Sort (cost=100.43..100.44 rows=6 width=56)
|
-> Sort
|
||||||
Sort Key: (max(users.lastseen)) DESC
|
Sort Key: (max(users.lastseen)) DESC
|
||||||
-> GroupAggregate (cost=100.14..100.29 rows=6 width=548)
|
-> GroupAggregate
|
||||||
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Sort (cost=100.14..100.16 rows=6 width=548)
|
-> Sort
|
||||||
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
|
||||||
-> Nested Loop Left Join (cost=40.04..100.06 rows=6 width=548)
|
-> Nested Loop Left Join
|
||||||
-> Limit (cost=28.08..28.09 rows=6 width=40)
|
-> Limit
|
||||||
-> Sort (cost=28.08..28.09 rows=6 width=40)
|
-> Sort
|
||||||
Sort Key: users.lastseen DESC
|
Sort Key: users.lastseen DESC
|
||||||
-> Seq Scan on users_270013 users (cost=0.00..28.00 rows=6 width=40)
|
-> Seq Scan on users_270013 users
|
||||||
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
|
||||||
-> Limit (cost=11.96..11.96 rows=1 width=524)
|
-> Limit
|
||||||
-> Sort (cost=11.96..11.96 rows=1 width=524)
|
-> Sort
|
||||||
Sort Key: events.event_time DESC
|
Sort Key: events.event_time DESC
|
||||||
-> Seq Scan on events_270009 events (cost=0.00..11.95 rows=1 width=524)
|
-> Seq Scan on events_270009 events
|
||||||
Filter: (((composite_id).tenant_id = ((users.composite_id).tenant_id)) AND ((composite_id).user_id = ((users.composite_id).user_id)))
|
Filter: (((composite_id).tenant_id = ((users.composite_id).tenant_id)) AND ((composite_id).user_id = ((users.composite_id).user_id)))
|
||||||
Master Query
|
Master Query
|
||||||
-> Limit (cost=0.01..0.02 rows=0 width=0)
|
-> Limit
|
||||||
-> Sort (cost=0.01..0.02 rows=0 width=0)
|
-> Sort
|
||||||
Sort Key: intermediate_column_270017_2 DESC
|
Sort Key: intermediate_column_270017_2 DESC
|
||||||
-> Seq Scan on pg_merge_job_270017 (cost=0.00..0.00 rows=0 width=0)
|
-> Seq Scan on pg_merge_job_270017
|
||||||
(29 rows)
|
(29 rows)
|
||||||
|
|
||||||
SET citusdb.task_executor_type TO 'real-time';
|
SET citusdb.task_executor_type TO 'real-time';
|
||||||
|
|
|
@ -209,7 +209,7 @@ EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
|
||||||
SET citus.task_executor_type TO 'real-time';
|
SET citus.task_executor_type TO 'real-time';
|
||||||
|
|
||||||
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
|
||||||
EXPLAIN EXECUTE router_executor_query;
|
EXPLAIN (COSTS FALSE) EXECUTE router_executor_query;
|
||||||
|
|
||||||
PREPARE real_time_executor_query AS
|
PREPARE real_time_executor_query AS
|
||||||
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||||
|
|
Loading…
Reference in New Issue