diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile
index 5de6ed453..701afa168 100644
--- a/src/test/regress/Makefile
+++ b/src/test/regress/Makefile
@@ -37,7 +37,7 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $
# intermediate, for muscle memory backward compatibility.
check: check-full
# check-full triggers all tests that ought to be run routinely
-check-full: check-multi check-multi-task-tracker-extra check-multi-binary check-worker
+check-full: check-multi check-multi-mx check-multi-task-tracker-extra check-multi-binary check-worker
# using pg_regress_multi_check unnecessarily starts up multiple nodes, which isn't needed
# for check-worker. But that's harmless besides a few cycles.
@@ -59,6 +59,10 @@ check-isolation: all tempinstall-main
check-vanilla: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citus --vanillatest
+
+check-multi-mx: all tempinstall-main
+ $(pg_regress_multi_check) --load-extension=citus \
+ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_mx_schedule $(EXTRA_TESTS)
check-multi-task-tracker-extra: all tempinstall-main
$(pg_regress_multi_check) --load-extension=citus \
diff --git a/src/test/regress/expected/.gitignore b/src/test/regress/expected/.gitignore
index e560dfd00..9711f0c5e 100644
--- a/src/test/regress/expected/.gitignore
+++ b/src/test/regress/expected/.gitignore
@@ -16,3 +16,4 @@
/multi_subquery_0.out
/worker_copy.out
/multi_complex_count_distinct.out
+/multi_mx_copy_data.out
diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out
new file mode 100644
index 000000000..5c17af922
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_create_table.out
@@ -0,0 +1,496 @@
+--
+-- MULTI_MX_CREATE_TABLE
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1220000;
+SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
+ start_metadata_sync_to_node
+-----------------------------
+
+(1 row)
+
+SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
+ start_metadata_sync_to_node
+-----------------------------
+
+(1 row)
+
+-- create schema to test schema support
+CREATE SCHEMA citus_mx_test_schema;
+CREATE SCHEMA citus_mx_test_schema_join_1;
+CREATE SCHEMA citus_mx_test_schema_join_2;
+-- create UDFs that we're going to use in our tests
+SET search_path TO public;
+CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+SET search_path TO citus_mx_test_schema;
+CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
+CREATE OPERATOR citus_mx_test_schema.=== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==,
+ HASHES, MERGES
+);
+SET search_path TO public;
+CREATE COLLATION citus_mx_test_schema.english FROM "en_US";
+CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
+CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
+-- now create required stuff in the worker 1
+\c - - - :worker_1_port
+-- create schema to test schema support
+CREATE SCHEMA citus_mx_test_schema;
+CREATE SCHEMA citus_mx_test_schema_join_1;
+CREATE SCHEMA citus_mx_test_schema_join_2;
+-- create UDFs in worker node
+CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+SET search_path TO citus_mx_test_schema;
+CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
+-- create operator
+CREATE OPERATOR citus_mx_test_schema.=== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==,
+ HASHES, MERGES
+);
+SET search_path TO public;
+CREATE COLLATION citus_mx_test_schema.english FROM "en_US";
+SET search_path TO public;
+CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
+CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
+-- now create required stuff in the worker 2
+\c - - - :worker_2_port
+-- create schema to test schema support
+CREATE SCHEMA citus_mx_test_schema;
+CREATE SCHEMA citus_mx_test_schema_join_1;
+CREATE SCHEMA citus_mx_test_schema_join_2;
+-- create UDF
+CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+SET search_path TO citus_mx_test_schema;
+CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
+-- create operator
+CREATE OPERATOR citus_mx_test_schema.=== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==,
+ HASHES, MERGES
+);
+SET search_path TO public;
+CREATE COLLATION citus_mx_test_schema.english FROM "en_US";
+SET search_path TO public;
+CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
+CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
+-- connect back to the master, and do some more tests
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+SET search_path TO public;
+CREATE TABLE nation_hash(
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152)
+);
+SET citus.shard_count TO 16;
+SELECT create_distributed_table('nation_hash', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET search_path TO citus_mx_test_schema;
+-- create mx tables that we're going to use for our tests
+CREATE TABLE citus_mx_test_schema.nation_hash(
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152)
+);
+SELECT create_distributed_table('nation_hash', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE citus_mx_test_schema_join_1.nation_hash (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+SET citus.shard_count TO 4;
+SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET search_path TO citus_mx_test_schema_join_2;
+CREATE TABLE nation_hash (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+SELECT create_distributed_table('nation_hash', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET search_path TO citus_mx_test_schema;
+CREATE TABLE nation_hash_collation_search_path(
+ n_nationkey integer not null,
+ n_name char(25) not null COLLATE english,
+ n_regionkey integer not null,
+ n_comment varchar(152)
+);
+SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+\COPY nation_hash_collation_search_path FROM STDIN with delimiter '|';
+CREATE TABLE citus_mx_test_schema.nation_hash_composite_types(
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152),
+ test_col citus_mx_test_schema.new_composite_type
+);
+SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+-- insert some data to verify composite type queries
+\COPY citus_mx_test_schema.nation_hash_composite_types FROM STDIN with delimiter '|';
+-- now create tpch tables
+-- Create new table definitions for use in testing in distributed planning and
+-- execution functionality. Also create indexes to boost performance.
+SET search_path TO public;
+CREATE TABLE lineitem_mx (
+ l_orderkey bigint not null,
+ l_partkey integer not null,
+ l_suppkey integer not null,
+ l_linenumber integer not null,
+ l_quantity decimal(15, 2) not null,
+ l_extendedprice decimal(15, 2) not null,
+ l_discount decimal(15, 2) not null,
+ l_tax decimal(15, 2) not null,
+ l_returnflag char(1) not null,
+ l_linestatus char(1) not null,
+ l_shipdate date not null,
+ l_commitdate date not null,
+ l_receiptdate date not null,
+ l_shipinstruct char(25) not null,
+ l_shipmode char(10) not null,
+ l_comment varchar(44) not null,
+ PRIMARY KEY(l_orderkey, l_linenumber) );
+SET citus.shard_count TO 16;
+SELECT create_distributed_table('lineitem_mx', 'l_orderkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE INDEX lineitem_mx_time_index ON lineitem_mx (l_shipdate);
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+CREATE TABLE orders_mx (
+ o_orderkey bigint not null,
+ o_custkey integer not null,
+ o_orderstatus char(1) not null,
+ o_totalprice decimal(15,2) not null,
+ o_orderdate date not null,
+ o_orderpriority char(15) not null,
+ o_clerk char(15) not null,
+ o_shippriority integer not null,
+ o_comment varchar(79) not null,
+ PRIMARY KEY(o_orderkey) );
+SELECT create_distributed_table('orders_mx', 'o_orderkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE customer_mx (
+ c_custkey integer not null,
+ c_name varchar(25) not null,
+ c_address varchar(40) not null,
+ c_nationkey integer not null,
+ c_phone char(15) not null,
+ c_acctbal decimal(15,2) not null,
+ c_mktsegment char(10) not null,
+ c_comment varchar(117) not null);
+SET citus.shard_count TO 1;
+SELECT create_distributed_table('customer_mx', 'c_custkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE nation_mx (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+SELECT create_distributed_table('nation_mx', 'n_nationkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE part_mx (
+ p_partkey integer not null,
+ p_name varchar(55) not null,
+ p_mfgr char(25) not null,
+ p_brand char(10) not null,
+ p_type varchar(25) not null,
+ p_size integer not null,
+ p_container char(10) not null,
+ p_retailprice decimal(15,2) not null,
+ p_comment varchar(23) not null);
+SELECT create_distributed_table('part_mx', 'p_partkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE supplier_mx
+(
+ s_suppkey integer not null,
+ s_name char(25) not null,
+ s_address varchar(40) not null,
+ s_nationkey integer,
+ s_phone char(15) not null,
+ s_acctbal decimal(15,2) not null,
+ s_comment varchar(101) not null
+);
+SELECT create_distributed_table('supplier_mx', 's_suppkey');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+-- Create test table for ddl
+CREATE TABLE mx_ddl_table (
+ key int primary key,
+ value int
+);
+SET citus.shard_count TO 4;
+SELECT create_distributed_table('mx_ddl_table', 'key', 'hash');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+-- Load some test data
+COPY mx_ddl_table (key, value) FROM STDIN WITH (FORMAT 'csv');
+-- test table for modifications
+CREATE TABLE limit_orders_mx (
+ id bigint PRIMARY KEY,
+ symbol text NOT NULL,
+ bidder_id bigint NOT NULL,
+ placed_at timestamp NOT NULL,
+ kind order_side_mx NOT NULL,
+ limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00)
+);
+SET citus.shard_count TO 2;
+SELECT create_distributed_table('limit_orders_mx', 'id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+-- test table for modifications
+CREATE TABLE multiple_hash_mx (
+ category text NOT NULL,
+ data text NOT NULL
+);
+SELECT create_distributed_table('multiple_hash_mx', 'category');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET citus.shard_count TO 4;
+CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text);
+SELECT create_distributed_table('app_analytics_events_mx', 'app_id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE researchers_mx (
+ id bigint NOT NULL,
+ lab_id int NOT NULL,
+ name text NOT NULL
+);
+SET citus.shard_count TO 2;
+SELECT create_distributed_table('researchers_mx', 'lab_id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE labs_mx (
+ id bigint NOT NULL,
+ name text NOT NULL
+);
+SET citus.shard_count TO 1;
+SELECT create_distributed_table('labs_mx', 'id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+-- now, for some special failures...
+CREATE TABLE objects_mx (
+ id bigint PRIMARY KEY,
+ name text NOT NULL
+);
+SELECT create_distributed_table('objects_mx', 'id', 'hash');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE articles_hash_mx (
+ id bigint NOT NULL,
+ author_id bigint NOT NULL,
+ title varchar(20) NOT NULL,
+ word_count integer
+);
+-- this table is used in router executor tests
+CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx);
+SET citus.shard_count TO 2;
+SELECT create_distributed_table('articles_hash_mx', 'author_id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET citus.shard_count TO 1;
+SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET citus.shard_count TO 4;
+CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int);
+SELECT create_distributed_table('company_employees_mx', 'company_id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+WITH shard_counts AS (
+ SELECT logicalrelid, count(*) AS shard_count FROM pg_dist_shard GROUP BY logicalrelid
+ )
+SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel
+FROM pg_dist_partition NATURAL JOIN shard_counts
+ORDER BY colocationid, logicalrelid;
+ logicalrelid | colocationid | shard_count | partmethod | repmodel
+--------------------------------------------------------+--------------+-------------+------------+----------
+ nation_hash | 2 | 16 | h | s
+ citus_mx_test_schema.nation_hash | 2 | 16 | h | s
+ citus_mx_test_schema_join_1.nation_hash | 3 | 4 | h | s
+ citus_mx_test_schema_join_1.nation_hash_2 | 3 | 4 | h | s
+ citus_mx_test_schema_join_2.nation_hash | 3 | 4 | h | s
+ citus_mx_test_schema.nation_hash_collation_search_path | 3 | 4 | h | s
+ citus_mx_test_schema.nation_hash_composite_types | 3 | 4 | h | s
+ mx_ddl_table | 3 | 4 | h | s
+ app_analytics_events_mx | 3 | 4 | h | s
+ company_employees_mx | 3 | 4 | h | s
+ lineitem_mx | 4 | 16 | h | s
+ orders_mx | 4 | 16 | h | s
+ customer_mx | 5 | 1 | h | s
+ nation_mx | 5 | 1 | h | s
+ part_mx | 5 | 1 | h | s
+ supplier_mx | 5 | 1 | h | s
+ limit_orders_mx | 6 | 2 | h | s
+ articles_hash_mx | 6 | 2 | h | s
+ multiple_hash_mx | 7 | 2 | h | s
+ researchers_mx | 8 | 2 | h | s
+ labs_mx | 9 | 1 | h | s
+ objects_mx | 9 | 1 | h | s
+ articles_single_shard_hash_mx | 9 | 1 | h | s
+(23 rows)
+
diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out
new file mode 100644
index 000000000..b57ee4cdd
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_ddl.out
@@ -0,0 +1,192 @@
+-- Tests related to distributed DDL commands on mx cluster
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1600000;
+SELECT * FROM mx_ddl_table ORDER BY key;
+ key | value
+-----+-------
+ 1 | 10
+ 2 | 11
+ 3 | 21
+ 4 | 37
+ 5 | 60
+ 6 | 100
+ 10 | 200
+ 11 | 230
+(8 rows)
+
+-- CREATE INDEX
+CREATE INDEX ddl_test_index ON mx_ddl_table(value);
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+-- ADD COLUMN
+ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER;
+-- SET DEFAULT
+ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1;
+SELECT master_modify_multiple_shards('UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL');
+ master_modify_multiple_shards
+-------------------------------
+ 8
+(1 row)
+
+-- SET NOT NULL
+ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
+-- See that the changes are applied on schema node, worker tables and shards
+\d mx_ddl_table
+ Table "public.mx_ddl_table"
+ Column | Type | Modifiers
+---------+---------+--------------------
+ key | integer | not null
+ value | integer |
+ version | integer | not null default 1
+Indexes:
+ "mx_ddl_table_pkey" PRIMARY KEY, btree (key)
+ "ddl_test_index" btree (value)
+
+\c - - - :worker_1_port
+\d mx_ddl_table
+ Table "public.mx_ddl_table"
+ Column | Type | Modifiers
+---------+---------+--------------------
+ key | integer | not null
+ value | integer |
+ version | integer | not null default 1
+Indexes:
+ "mx_ddl_table_pkey" PRIMARY KEY, btree (key)
+ "ddl_test_index" btree (value)
+
+\d mx_ddl_table_1600000
+\c - - - :worker_2_port
+\d mx_ddl_table
+ Table "public.mx_ddl_table"
+ Column | Type | Modifiers
+---------+---------+--------------------
+ key | integer | not null
+ value | integer |
+ version | integer | not null default 1
+Indexes:
+ "mx_ddl_table_pkey" PRIMARY KEY, btree (key)
+ "ddl_test_index" btree (value)
+
+\d mx_ddl_table_1600001
+INSERT INTO mx_ddl_table VALUES (37, 78, 2);
+INSERT INTO mx_ddl_table VALUES (38, 78);
+-- Switch to the schema node
+\c - - - :master_port
+-- SET DATA TYPE
+ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision;
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+INSERT INTO mx_ddl_table VALUES (78, 83, 2.1);
+\c - - - :worker_1_port
+SELECT * FROM mx_ddl_table ORDER BY key;
+ key | value | version
+-----+-------+---------
+ 1 | 10 | 0
+ 2 | 11 | 0
+ 3 | 21 | 0
+ 4 | 37 | 0
+ 5 | 60 | 0
+ 6 | 100 | 0
+ 10 | 200 | 0
+ 11 | 230 | 0
+ 37 | 78 | 2
+ 38 | 78 | 1
+ 78 | 83 | 2.1
+(11 rows)
+
+-- Switch to the schema node
+\c - - - :master_port
+-- DROP INDEX
+DROP INDEX ddl_test_index;
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+-- DROP DEFAULT
+ALTER TABLE mx_ddl_table ALTER COLUMN version DROP DEFAULT;
+-- DROP NOT NULL
+ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL;
+-- DROP COLUMN
+ALTER TABLE mx_ddl_table DROP COLUMN version;
+-- See that the changes are applied on schema node, worker tables and shards
+\d mx_ddl_table
+ Table "public.mx_ddl_table"
+ Column | Type | Modifiers
+--------+---------+-----------
+ key | integer | not null
+ value | integer |
+Indexes:
+ "mx_ddl_table_pkey" PRIMARY KEY, btree (key)
+
+\c - - - :worker_1_port
+\d mx_ddl_table
+ Table "public.mx_ddl_table"
+ Column | Type | Modifiers
+--------+---------+-----------
+ key | integer | not null
+ value | integer |
+Indexes:
+ "mx_ddl_table_pkey" PRIMARY KEY, btree (key)
+
+\d mx_ddl_table_1600000
+\c - - - :worker_2_port
+\d mx_ddl_table
+ Table "public.mx_ddl_table"
+ Column | Type | Modifiers
+--------+---------+-----------
+ key | integer | not null
+ value | integer |
+Indexes:
+ "mx_ddl_table_pkey" PRIMARY KEY, btree (key)
+
+\d mx_ddl_table_1600001
+-- Show that DDL commands are done within a two-phase commit transaction
+\c - - - :master_port
+SET client_min_messages TO debug2;
+CREATE INDEX ddl_test_index ON mx_ddl_table(value);
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+DEBUG: building index "ddl_test_index" on table "mx_ddl_table"
+RESET client_min_messages;
+DROP INDEX ddl_test_index;
+-- show that sequences owned by mx tables result in unique values
+SET citus.shard_replication_factor TO 1;
+SET citus.shard_count TO 4;
+SET citus.replication_model TO streaming;
+CREATE TABLE mx_sequence(key INT, value BIGSERIAL);
+SELECT create_distributed_table('mx_sequence', 'key');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+\c - - - :worker_1_port
+SELECT groupid FROM pg_dist_local_group;
+ groupid
+---------
+ 14
+(1 row)
+
+SELECT * FROM mx_sequence_value_seq;
+ sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called
+-----------------------+------------------+------------------+--------------+------------------+------------------+-------------+---------+-----------+-----------
+ mx_sequence_value_seq | 3940649673949185 | 3940649673949185 | 1 | 4222124650659841 | 3940649673949185 | 1 | 0 | f | f
+(1 row)
+
+\c - - - :worker_2_port
+SELECT groupid FROM pg_dist_local_group;
+ groupid
+---------
+ 16
+(1 row)
+
+SELECT * FROM mx_sequence_value_seq;
+ sequence_name | last_value | start_value | increment_by | max_value | min_value | cache_value | log_cnt | is_cycled | is_called
+-----------------------+------------------+------------------+--------------+------------------+------------------+-------------+---------+-----------+-----------
+ mx_sequence_value_seq | 4503599627370497 | 4503599627370497 | 1 | 4785074604081153 | 4503599627370497 | 1 | 0 | f | f
+(1 row)
+
+\c - - - :master_port
+-- the type of sequences can't be changed
+ALTER TABLE mx_sequence ALTER value TYPE BIGINT;
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+ALTER TABLE mx_sequence ALTER value TYPE INT;
diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out
new file mode 100644
index 000000000..7e8be01a0
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_explain.out
@@ -0,0 +1,709 @@
+--
+-- MULTI_MX_EXPLAIN
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :master_port
+\a\t
+SET citus.task_executor_type TO 'real-time';
+SET citus.explain_distributed_queries TO on;
+\c - - - :worker_1_port
+-- Function that parses explain output as JSON
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+\c - - - :worker_2_port
+-- Function that parses explain output as JSON
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Distributed Query into pg_merge_job_68720796736
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+Master Query
+ -> Sort
+ Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(intermediate_column_68720796736_1))::bigint, '0'::bigint))))::bigint, '0'::bigint), intermediate_column_68720796736_0
+ -> HashAggregate
+ Group Key: intermediate_column_68720796736_0
+ -> Seq Scan on pg_merge_job_68720796736
+-- Test JSON format
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+[
+ {
+ "Executor": "Real-Time",
+ "Job": {
+ "Task Count": 16,
+ "Tasks Shown": "One of 16",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=57637 dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Group Key": ["l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Relation Name": "lineitem_mx_1220052",
+ "Alias": "lineitem_mx"
+ }
+ ]
+ }
+ }
+ ]
+
+ ]
+ }
+ ]
+ },
+ "Master Query": [
+ {
+ "Plan": {
+ "Node Type": "Sort",
+ "Parallel Aware": false,
+ "Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(intermediate_column_68720796737_1))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "intermediate_column_68720796737_0"],
+ "Plans": [
+ {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Group Key": ["intermediate_column_68720796737_0"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Relation Name": "pg_merge_job_68720796737",
+ "Alias": "pg_merge_job_68720796737"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+]
+-- Validate JSON format
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+\c - - - :worker_1_port
+-- Test XML format
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+
+ Real-Time
+
+ 16
+ One of 16
+
+
+ host=localhost port=57637 dbname=regression
+
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ false
+
+ - l_quantity
+
+
+
+ Seq Scan
+ Outer
+ false
+ lineitem_mx_1220052
+ lineitem_mx
+
+
+
+
+
+
+
+
+
+
+
+
+ Sort
+ false
+
+ - COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(intermediate_column_60130862144_1))::bigint, '0'::bigint))))::bigint, '0'::bigint)
+ - intermediate_column_60130862144_0
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ Outer
+ false
+
+ - intermediate_column_60130862144_0
+
+
+
+ Seq Scan
+ Outer
+ false
+ pg_merge_job_60130862144
+ pg_merge_job_60130862144
+
+
+
+
+
+
+
+
+
+-- Validate XML format
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+-- Test YAML format
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+- Executor: "Real-Time"
+ Job:
+ Task Count: 16
+ Tasks Shown: "One of 16"
+ Tasks:
+ - Node: "host=localhost port=57637 dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Group Key:
+ - "l_quantity"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Relation Name: "lineitem_mx_1220052"
+ Alias: "lineitem_mx"
+
+ Master Query:
+ - Plan:
+ Node Type: "Sort"
+ Parallel Aware: false
+ Sort Key:
+ - "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(intermediate_column_60130862146_1))::bigint, '0'::bigint))))::bigint, '0'::bigint)"
+ - "intermediate_column_60130862146_0"
+ Plans:
+ - Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Group Key:
+ - "intermediate_column_60130862146_0"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Relation Name: "pg_merge_job_60130862146"
+ Alias: "pg_merge_job_60130862146"
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Distributed Query into pg_merge_job_60130862147
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+Master Query
+ -> Sort
+ Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(intermediate_column_60130862147_1))::bigint, '0'::bigint))))::bigint, '0'::bigint), intermediate_column_60130862147_0
+ -> HashAggregate
+ Group Key: intermediate_column_60130862147_0
+ -> Seq Scan on pg_merge_job_60130862147
+\c - - - :worker_2_port
+-- Test verbose
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
+Distributed Query into pg_merge_job_68720796739
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
+ -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+Master Query
+ -> Aggregate
+ Output: (sum(intermediate_column_68720796739_0) / (sum(intermediate_column_68720796739_1) / pg_catalog.sum(intermediate_column_68720796739_2)))
+ -> Seq Scan on pg_temp_2.pg_merge_job_68720796739
+ Output: intermediate_column_68720796739_0, intermediate_column_68720796739_1, intermediate_column_68720796739_2
+-- Test join
+EXPLAIN (COSTS FALSE)
+ SELECT * FROM lineitem_mx
+ JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0
+ ORDER BY l_quantity LIMIT 10;
+Distributed Query into pg_merge_job_68720796740
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: lineitem_mx.l_quantity
+ -> Hash Join
+ Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_quantity < 5.0)
+ -> Hash
+ -> Seq Scan on orders_mx_1220068 orders_mx
+Master Query
+ -> Limit
+ -> Sort
+ Sort Key: intermediate_column_68720796740_4
+ -> Seq Scan on pg_merge_job_68720796740
+-- Test insert
+EXPLAIN (COSTS FALSE)
+ INSERT INTO lineitem_mx VALUES(1,0);
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Insert on lineitem_mx_1220052
+ -> Result
+-- Test update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_mx
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Update on lineitem_mx_1220052
+ -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_mx
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Delete on lineitem_mx_1220052
+ -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test single-shard SELECT
+EXPLAIN (COSTS FALSE)
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Bitmap Heap Scan on lineitem_mx_1220055 lineitem_mx
+ Recheck Cond: (l_orderkey = 5)
+ -> Bitmap Index Scan on lineitem_mx_pkey_1220055
+ Index Cond: (l_orderkey = 5)
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+t
+-- Test CREATE TABLE ... AS
+EXPLAIN (COSTS FALSE)
+ CREATE TABLE explain_result AS
+ SELECT * FROM lineitem_mx;
+Distributed Query into pg_merge_job_68720796741
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+Master Query
+ -> Seq Scan on pg_merge_job_68720796741
+-- Test all tasks output
+SET citus.explain_all_tasks TO on;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+Distributed Query into pg_merge_job_68720796742
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220053 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220054 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220055 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220056 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220057 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220058 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220059 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220060 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220061 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220062 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220063 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220064 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220065 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220066 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220067 lineitem_mx
+ Filter: (l_orderkey > 9030)
+Master Query
+ -> Aggregate
+ -> Seq Scan on pg_merge_job_68720796742
+SELECT true AS valid FROM explain_xml($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+t
+-- Test track tracker
+SET citus.task_executor_type TO 'task-tracker';
+SET citus.explain_all_tasks TO off;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+Distributed Query into pg_merge_job_68720796745
+ Executor: Task-Tracker
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_orderkey > 9030)
+Master Query
+ -> Aggregate
+ -> Seq Scan on pg_merge_job_68720796745
+-- Test re-partition join
+SET citus.large_table_shard_count TO 1;
+EXPLAIN (COSTS FALSE)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+Distributed Query into pg_merge_job_68720796750
+ Executor: Task-Tracker
+ Task Count: 4
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 4
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 16
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 4
+Master Query
+ -> Aggregate
+ -> Seq Scan on pg_merge_job_68720796750
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+[
+ {
+ "Executor": "Task-Tracker",
+ "Job": {
+ "Task Count": 4,
+ "Tasks Shown": "None, not supported for re-partition queries",
+ "Depended Jobs": [
+ {
+ "Map Task Count": 4,
+ "Merge Task Count": 4,
+ "Depended Jobs": [
+ {
+ "Map Task Count": 16,
+ "Merge Task Count": 4
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 4
+ }
+ ]
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 4
+ }
+ ]
+ },
+ "Master Query": [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Plain",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Relation Name": "pg_merge_job_68720796755",
+ "Alias": "pg_merge_job_68720796755"
+ }
+ ]
+ }
+ }
+ ]
+ }
+]
+SELECT true AS valid FROM explain_json($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+
+ Task-Tracker
+
+ 4
+ None, not supported for re-partition queries
+
+
+ 4
+ 4
+
+
+ 16
+ 4
+
+
+ 1
+ 4
+
+
+
+
+ 1
+ 4
+
+
+
+
+
+
+ Aggregate
+ Plain
+ Simple
+ false
+
+
+ Seq Scan
+ Outer
+ false
+ pg_merge_job_68720796765
+ pg_merge_job_68720796765
+
+
+
+
+
+
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+- Executor: "Task-Tracker"
+ Job:
+ Task Count: 4
+ Tasks Shown: "None, not supported for re-partition queries"
+ Depended Jobs:
+ - Map Task Count: 4
+ Merge Task Count: 4
+ Depended Jobs:
+ - Map Task Count: 16
+ Merge Task Count: 4
+ - Map Task Count: 1
+ Merge Task Count: 4
+ - Map Task Count: 1
+ Merge Task Count: 4
+ Master Query:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Plain"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Relation Name: "pg_merge_job_68720796775"
+ Alias: "pg_merge_job_68720796775"
diff --git a/src/test/regress/expected/multi_mx_explain_0.out b/src/test/regress/expected/multi_mx_explain_0.out
new file mode 100644
index 000000000..489e4c5ca
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_explain_0.out
@@ -0,0 +1,679 @@
+--
+-- MULTI_MX_EXPLAIN
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :master_port
+\a\t
+SET citus.task_executor_type TO 'real-time';
+SET citus.explain_distributed_queries TO on;
+\c - - - :worker_1_port
+-- Function that parses explain output as JSON
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+\c - - - :worker_2_port
+-- Function that parses explain output as JSON
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Distributed Query into pg_merge_job_68720796736
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+Master Query
+ -> Sort
+ Sort Key: COALESCE((sum((COALESCE((sum(intermediate_column_68720796736_1))::bigint, '0'::bigint))))::bigint, '0'::bigint), intermediate_column_68720796736_0
+ -> HashAggregate
+ Group Key: intermediate_column_68720796736_0
+ -> Seq Scan on pg_merge_job_68720796736
+-- Test JSON format
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+[
+ {
+ "Executor": "Real-Time",
+ "Job": {
+ "Task Count": 16,
+ "Tasks Shown": "One of 16",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=57637 dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Group Key": ["l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Relation Name": "lineitem_mx_1220052",
+ "Alias": "lineitem_mx"
+ }
+ ]
+ }
+ }
+ ]
+
+ ]
+ }
+ ]
+ },
+ "Master Query": [
+ {
+ "Plan": {
+ "Node Type": "Sort",
+ "Sort Key": ["COALESCE((sum((COALESCE((sum(intermediate_column_68720796737_1))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "intermediate_column_68720796737_0"],
+ "Plans": [
+ {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Parent Relationship": "Outer",
+ "Group Key": ["intermediate_column_68720796737_0"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Relation Name": "pg_merge_job_68720796737",
+ "Alias": "pg_merge_job_68720796737"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+]
+-- Validate JSON format
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+\c - - - :worker_1_port
+-- Test XML format
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+
+ Real-Time
+
+ 16
+ One of 16
+
+
+ host=localhost port=57637 dbname=regression
+
+
+
+
+ Aggregate
+ Hashed
+
+ - l_quantity
+
+
+
+ Seq Scan
+ Outer
+ lineitem_mx_1220052
+ lineitem_mx
+
+
+
+
+
+
+
+
+
+
+
+
+ Sort
+
+ - COALESCE((sum((COALESCE((sum(intermediate_column_60130862144_1))::bigint, '0'::bigint))))::bigint, '0'::bigint)
+ - intermediate_column_60130862144_0
+
+
+
+ Aggregate
+ Hashed
+ Outer
+
+ - intermediate_column_60130862144_0
+
+
+
+ Seq Scan
+ Outer
+ pg_merge_job_60130862144
+ pg_merge_job_60130862144
+
+
+
+
+
+
+
+
+
+-- Validate XML format
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+-- Test YAML format
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+- Executor: "Real-Time"
+ Job:
+ Task Count: 16
+ Tasks Shown: "One of 16"
+ Tasks:
+ - Node: "host=localhost port=57637 dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Group Key:
+ - "l_quantity"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Relation Name: "lineitem_mx_1220052"
+ Alias: "lineitem_mx"
+
+ Master Query:
+ - Plan:
+ Node Type: "Sort"
+ Sort Key:
+ - "COALESCE((sum((COALESCE((sum(intermediate_column_60130862146_1))::bigint, '0'::bigint))))::bigint, '0'::bigint)"
+ - "intermediate_column_60130862146_0"
+ Plans:
+ - Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Parent Relationship: "Outer"
+ Group Key:
+ - "intermediate_column_60130862146_0"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Relation Name: "pg_merge_job_60130862146"
+ Alias: "pg_merge_job_60130862146"
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Distributed Query into pg_merge_job_60130862147
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+Master Query
+ -> Sort
+ Sort Key: COALESCE((sum((COALESCE((sum(intermediate_column_60130862147_1))::bigint, '0'::bigint))))::bigint, '0'::bigint), intermediate_column_60130862147_0
+ -> HashAggregate
+ Group Key: intermediate_column_60130862147_0
+ -> Seq Scan on pg_merge_job_60130862147
+\c - - - :worker_2_port
+-- Test verbose
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
+Distributed Query into pg_merge_job_68720796739
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
+ -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+Master Query
+ -> Aggregate
+ Output: (sum(intermediate_column_68720796739_0) / (sum(intermediate_column_68720796739_1) / sum(intermediate_column_68720796739_2)))
+ -> Seq Scan on pg_temp_2.pg_merge_job_68720796739
+ Output: intermediate_column_68720796739_0, intermediate_column_68720796739_1, intermediate_column_68720796739_2
+-- Test join
+EXPLAIN (COSTS FALSE)
+ SELECT * FROM lineitem_mx
+ JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0
+ ORDER BY l_quantity LIMIT 10;
+Distributed Query into pg_merge_job_68720796740
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: lineitem_mx.l_quantity
+ -> Hash Join
+ Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey)
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_quantity < 5.0)
+ -> Hash
+ -> Seq Scan on orders_mx_1220068 orders_mx
+Master Query
+ -> Limit
+ -> Sort
+ Sort Key: intermediate_column_68720796740_4
+ -> Seq Scan on pg_merge_job_68720796740
+-- Test insert
+EXPLAIN (COSTS FALSE)
+ INSERT INTO lineitem_mx VALUES(1,0);
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Insert on lineitem_mx_1220052
+ -> Result
+-- Test update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_mx
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Update on lineitem_mx_1220052
+ -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_mx
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Delete on lineitem_mx_1220052
+ -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test single-shard SELECT
+EXPLAIN (COSTS FALSE)
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
+Distributed Query
+ Executor: Router
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Bitmap Heap Scan on lineitem_mx_1220055 lineitem_mx
+ Recheck Cond: (l_orderkey = 5)
+ -> Bitmap Index Scan on lineitem_mx_pkey_1220055
+ Index Cond: (l_orderkey = 5)
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+t
+-- Test CREATE TABLE ... AS
+EXPLAIN (COSTS FALSE)
+ CREATE TABLE explain_result AS
+ SELECT * FROM lineitem_mx;
+Distributed Query into pg_merge_job_68720796741
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+Master Query
+ -> Seq Scan on pg_merge_job_68720796741
+-- Test all tasks output
+SET citus.explain_all_tasks TO on;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+Distributed Query into pg_merge_job_68720796742
+ Executor: Real-Time
+ Task Count: 16
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220053 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220054 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220055 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220056 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220057 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220058 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220059 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220060 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220061 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220062 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220063 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220064 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220065 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220066 lineitem_mx
+ Filter: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=57638 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220067 lineitem_mx
+ Filter: (l_orderkey > 9030)
+Master Query
+ -> Aggregate
+ -> Seq Scan on pg_merge_job_68720796742
+SELECT true AS valid FROM explain_xml($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+t
+-- Test track tracker
+SET citus.task_executor_type TO 'task-tracker';
+SET citus.explain_all_tasks TO off;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+Distributed Query into pg_merge_job_68720796745
+ Executor: Task-Tracker
+ Task Count: 16
+ Tasks Shown: One of 16
+ -> Task
+ Node: host=localhost port=57637 dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_mx_1220052 lineitem_mx
+ Filter: (l_orderkey > 9030)
+Master Query
+ -> Aggregate
+ -> Seq Scan on pg_merge_job_68720796745
+-- Test re-partition join
+SET citus.large_table_shard_count TO 1;
+EXPLAIN (COSTS FALSE)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+Distributed Query into pg_merge_job_68720796750
+ Executor: Task-Tracker
+ Task Count: 4
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 4
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 16
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 4
+Master Query
+ -> Aggregate
+ -> Seq Scan on pg_merge_job_68720796750
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+[
+ {
+ "Executor": "Task-Tracker",
+ "Job": {
+ "Task Count": 4,
+ "Tasks Shown": "None, not supported for re-partition queries",
+ "Depended Jobs": [
+ {
+ "Map Task Count": 4,
+ "Merge Task Count": 4,
+ "Depended Jobs": [
+ {
+ "Map Task Count": 16,
+ "Merge Task Count": 4
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 4
+ }
+ ]
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 4
+ }
+ ]
+ },
+ "Master Query": [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Plain",
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Relation Name": "pg_merge_job_68720796755",
+ "Alias": "pg_merge_job_68720796755"
+ }
+ ]
+ }
+ }
+ ]
+ }
+]
+SELECT true AS valid FROM explain_json($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+
+ Task-Tracker
+
+ 4
+ None, not supported for re-partition queries
+
+
+ 4
+ 4
+
+
+ 16
+ 4
+
+
+ 1
+ 4
+
+
+
+
+ 1
+ 4
+
+
+
+
+
+
+ Aggregate
+ Plain
+
+
+ Seq Scan
+ Outer
+ pg_merge_job_68720796765
+ pg_merge_job_68720796765
+
+
+
+
+
+
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+- Executor: "Task-Tracker"
+ Job:
+ Task Count: 4
+ Tasks Shown: "None, not supported for re-partition queries"
+ Depended Jobs:
+ - Map Task Count: 4
+ Merge Task Count: 4
+ Depended Jobs:
+ - Map Task Count: 16
+ Merge Task Count: 4
+ - Map Task Count: 1
+ Merge Task Count: 4
+ - Map Task Count: 1
+ Merge Task Count: 4
+ Master Query:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Plain"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Relation Name: "pg_merge_job_68720796775"
+ Alias: "pg_merge_job_68720796775"
diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out
new file mode 100644
index 000000000..9386a9642
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_metadata.out
@@ -0,0 +1,269 @@
+-- Test creation of mx tables and metadata syncing
+-- get rid of the previously created entries in pg_dist_transaction
+-- for the sake of getting consistent results in this test file
+SELECT recover_prepared_transactions();
+ recover_prepared_transactions
+-------------------------------
+ 0
+(1 row)
+
+CREATE TABLE distributed_mx_table (
+ key text primary key,
+ value jsonb
+);
+CREATE INDEX ON distributed_mx_table USING GIN (value);
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+SET citus.shard_count TO 4;
+SELECT create_distributed_table('distributed_mx_table', 'key');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+-- Verify that we've logged commit records
+SELECT count(*) FROM pg_dist_transaction;
+ count
+-------
+ 5
+(1 row)
+
+-- Confirm that the metadata transactions have been committed
+SELECT recover_prepared_transactions();
+ recover_prepared_transactions
+-------------------------------
+ 0
+(1 row)
+
+-- Verify that the commit records have been removed
+SELECT count(*) FROM pg_dist_transaction;
+ count
+-------
+ 3
+(1 row)
+
+\c - - - :worker_1_port
+\d distributed_mx_table
+Table "public.distributed_mx_table"
+ Column | Type | Modifiers
+--------+-------+-----------
+ key | text | not null
+ value | jsonb |
+Indexes:
+ "distributed_mx_table_pkey" PRIMARY KEY, btree (key)
+ "distributed_mx_table_value_idx" gin (value)
+
+SELECT repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+ repmodel
+----------
+ s
+(1 row)
+
+SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+ count
+-------
+ 4
+(1 row)
+
+\c - - - :worker_2_port
+\d distributed_mx_table
+Table "public.distributed_mx_table"
+ Column | Type | Modifiers
+--------+-------+-----------
+ key | text | not null
+ value | jsonb |
+Indexes:
+ "distributed_mx_table_pkey" PRIMARY KEY, btree (key)
+ "distributed_mx_table_value_idx" gin (value)
+
+SELECT repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+ repmodel
+----------
+ s
+(1 row)
+
+SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+ count
+-------
+ 4
+(1 row)
+
+-- Create a table and then roll back the transaction
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+BEGIN;
+CREATE TABLE should_not_exist (
+ key text primary key,
+ value jsonb
+);
+SELECT create_distributed_table('should_not_exist', 'key');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+ABORT;
+-- Verify that the table does not exist on the worker
+\c - - - :worker_1_port
+SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist';
+ count
+-------
+ 0
+(1 row)
+
+-- Ensure that we don't allow prepare on a metadata transaction
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+BEGIN;
+CREATE TABLE should_not_exist (
+ key text primary key,
+ value jsonb
+);
+SELECT create_distributed_table('should_not_exist', 'key');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+PREPARE TRANSACTION 'this_should_fail';
+ERROR: cannot use 2PC in transactions involving multiple servers
+-- now show that we can create tables and schemas withing a single transaction
+BEGIN;
+CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts;
+SET search_path TO citus_mx_schema_for_xacts;
+SET citus.shard_replication_factor TO 1;
+SET citus.shard_count TO 1;
+CREATE TABLE objects_for_xacts (
+ id bigint PRIMARY KEY,
+ name text NOT NULL
+);
+SELECT create_distributed_table('objects_for_xacts', 'id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+COMMIT;
+-- see that the table actually created and distributed
+\c - - - :worker_1_port
+SELECT repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
+ repmodel
+----------
+ s
+(1 row)
+
+SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
+WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
+ count
+-------
+ 1
+(1 row)
+
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+-- now show that we can rollback on creating mx table, but shards remain....
+BEGIN;
+CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts;
+NOTICE: schema "citus_mx_schema_for_xacts" already exists, skipping
+SET search_path TO citus_mx_schema_for_xacts;
+SET citus.shard_replication_factor TO 1;
+SET citus.shard_count TO 2;
+CREATE TABLE objects_for_xacts2 (
+ id bigint PRIMARY KEY,
+ name text NOT NULL
+);
+SELECT create_distributed_table('objects_for_xacts2', 'id');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+ROLLBACK;
+-- show that the table not exists on the schema node
+SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts';
+ count
+-------
+ 0
+(1 row)
+
+\c - - - :worker_1_port
+-- the distributed table not exists on the worker node
+SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts';
+ count
+-------
+ 0
+(1 row)
+
+-- but the shard exists since we do not create shards in a transaction
+SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts';
+ count
+-------
+ 1
+(1 row)
+
+-- make sure that master_drop_all_shards does not work from the worker nodes
+SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts');
+ERROR: operation is not allowed on this node
+HINT: Connect to the schema node and run it again.
+-- Ensure pg_dist_transaction is empty for test
+SELECT recover_prepared_transactions();
+ recover_prepared_transactions
+-------------------------------
+ 0
+(1 row)
+
+-- Create some "fake" prepared transactions to recover
+\c - - - :worker_1_port
+BEGIN;
+CREATE TABLE should_abort (value int);
+PREPARE TRANSACTION 'citus_0_should_abort';
+BEGIN;
+CREATE TABLE should_commit (value int);
+PREPARE TRANSACTION 'citus_0_should_commit';
+BEGIN;
+CREATE TABLE should_be_sorted_into_middle (value int);
+PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle';
+\c - - - :master_port
+-- Add "fake" pg_dist_transaction records and run recovery
+INSERT INTO pg_dist_transaction VALUES (14, 'citus_0_should_commit');
+INSERT INTO pg_dist_transaction VALUES (14, 'citus_0_should_be_forgotten');
+SELECT recover_prepared_transactions();
+NOTICE: recovered a prepared transaction on localhost:57637
+CONTEXT: ROLLBACK PREPARED 'citus_0_should_abort'
+NOTICE: recovered a prepared transaction on localhost:57637
+CONTEXT: ROLLBACK PREPARED 'citus_0_should_be_sorted_into_middle'
+NOTICE: recovered a prepared transaction on localhost:57637
+CONTEXT: COMMIT PREPARED 'citus_0_should_commit'
+ recover_prepared_transactions
+-------------------------------
+ 3
+(1 row)
+
+SELECT count(*) FROM pg_dist_transaction;
+ count
+-------
+ 3
+(1 row)
+
+-- Confirm that transactions were correctly rolled forward
+\c - - - :worker_1_port
+SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort';
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit';
+ count
+-------
+ 1
+(1 row)
+
diff --git a/src/test/regress/expected/multi_mx_modifications.out b/src/test/regress/expected/multi_mx_modifications.out
new file mode 100644
index 000000000..12360d823
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_modifications.out
@@ -0,0 +1,461 @@
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1330000;
+-- ===================================================================
+-- test end-to-end modification functionality for mx tables
+-- ===================================================================
+-- basic single-row INSERT
+INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
+ 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743;
+ count
+-------
+ 1
+(1 row)
+
+-- now singe-row INSERT from a worker
+\c - - - :worker_1_port
+INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
+ 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744;
+ count
+-------
+ 1
+(1 row)
+
+-- now singe-row INSERT to the other worker
+\c - - - :worker_2_port
+INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
+ 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745;
+ count
+-------
+ 1
+(1 row)
+
+-- and see all the inserted rows
+SELECT * FROM limit_orders_mx;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-------+--------+-----------+--------------------------+------+-------------
+ 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69
+ 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69
+ 32745 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69
+(3 rows)
+
+-- basic single-row INSERT with RETURNING
+INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-------+--------+-----------+--------------------------+------+-------------
+ 32746 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69
+(1 row)
+
+-- INSERT with DEFAULT in the target list
+INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell',
+ DEFAULT);
+SELECT * FROM limit_orders_mx WHERE id = 12756;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-------+--------+-----------+--------------------------+------+-------------
+ 12756 | MSFT | 10959 | Wed May 08 07:29:23 2013 | sell | 0.00
+(1 row)
+
+-- INSERT with expressions in target list
+INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' +
+ interval '5 hours', 'buy', sqrt(2));
+SELECT * FROM limit_orders_mx WHERE id = 430;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-----+--------+-----------+--------------------------+------+-----------------
+ 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731
+(1 row)
+
+-- INSERT without partition key
+INSERT INTO limit_orders_mx DEFAULT VALUES;
+ERROR: cannot plan INSERT using row with NULL value in partition column
+-- squelch WARNINGs that contain worker_port
+SET client_min_messages TO ERROR;
+-- INSERT violating NOT NULL constraint
+INSERT INTO limit_orders_mx VALUES (NULL, 'T', 975234, DEFAULT);
+ERROR: cannot plan INSERT using row with NULL value in partition column
+-- INSERT violating column constraint
+INSERT INTO limit_orders_mx VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell',
+ -5.00);
+ERROR: new row for relation "limit_orders_mx_1220092" violates check constraint "limit_orders_mx_limit_price_check"
+DETAIL: Failing row contains (18811, BUD, 14962, 2014-04-05 08:32:16, sell, -5.00).
+CONTEXT: while executing command on localhost:57637
+-- INSERT violating primary key constraint
+INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58);
+ERROR: duplicate key value violates unique constraint "limit_orders_mx_pkey_1220093"
+DETAIL: Key (id)=(32743) already exists.
+CONTEXT: while executing command on localhost:57638
+-- INSERT violating primary key constraint, with RETURNING specified.
+INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *;
+ERROR: duplicate key value violates unique constraint "limit_orders_mx_pkey_1220093"
+DETAIL: Key (id)=(32743) already exists.
+CONTEXT: while executing command on localhost:57638
+-- INSERT, with RETURNING specified, failing with a non-constraint error
+INSERT INTO limit_orders_mx VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0;
+ERROR: could not modify any active placements
+SET client_min_messages TO DEFAULT;
+-- commands with non-constant partition values are unsupported
+INSERT INTO limit_orders_mx VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45',
+ 'sell', 0.58);
+ERROR: values given for the partition column must be constants or constant expressions
+-- values for other columns are totally fine
+INSERT INTO limit_orders_mx VALUES (2036, 'GOOG', 5634, now(), 'buy', random());
+-- commands with mutable functions in their quals
+DELETE FROM limit_orders_mx WHERE id = 246 AND bidder_id = (random() * 1000);
+ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE
+-- commands with mutable but non-volatile functions(ie: stable func.) in their quals
+-- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable)
+DELETE FROM limit_orders_mx WHERE id = 246 AND placed_at = current_timestamp::timestamp;
+-- commands with multiple rows are unsupported
+INSERT INTO limit_orders_mx VALUES (DEFAULT), (DEFAULT);
+ERROR: cannot perform distributed planning for the given modification
+DETAIL: Multi-row INSERTs to distributed tables are not supported.
+-- INSERT ... SELECT ... FROM commands are unsupported from workers
+INSERT INTO limit_orders_mx SELECT * FROM limit_orders_mx;
+ERROR: operation is not allowed on this node
+HINT: Connect to the schema node and run it again.
+-- connect back to the other node
+\c - - - :worker_1_port
+-- commands containing a CTE are unsupported
+WITH deleted_orders AS (DELETE FROM limit_orders_mx RETURNING *)
+INSERT INTO limit_orders_mx DEFAULT VALUES;
+ERROR: common table expressions are not supported in distributed modifications
+-- test simple DELETE
+INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+ count
+-------
+ 1
+(1 row)
+
+DELETE FROM limit_orders_mx WHERE id = 246;
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+ count
+-------
+ 0
+(1 row)
+
+-- test simple DELETE with RETURNING
+DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-----+--------+-----------+--------------------------+------+-----------------
+ 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731
+(1 row)
+
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430;
+ count
+-------
+ 0
+(1 row)
+
+-- DELETE with expression in WHERE clause
+INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+ count
+-------
+ 1
+(1 row)
+
+DELETE FROM limit_orders_mx WHERE id = (2 * 123);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+ count
+-------
+ 0
+(1 row)
+
+-- commands with no constraints on the partition key are not supported
+DELETE FROM limit_orders_mx WHERE bidder_id = 162;
+ERROR: distributed modifications must target exactly one shard
+DETAIL: This command modifies all shards.
+HINT: Consider using an equality filter on partition column "id". You can use master_modify_multiple_shards() to perform multi-shard delete or update operations.
+-- commands with a USING clause are unsupported
+CREATE TABLE bidders ( name text, id bigint );
+DELETE FROM limit_orders_mx USING bidders WHERE limit_orders_mx.id = 246 AND
+ limit_orders_mx.bidder_id = bidders.id AND
+ bidders.name = 'Bernie Madoff';
+ERROR: cannot plan queries that include both regular and partitioned relations
+-- commands containing a CTE are unsupported
+WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *)
+DELETE FROM limit_orders_mx;
+ERROR: common table expressions are not supported in distributed modifications
+-- cursors are not supported
+DELETE FROM limit_orders_mx WHERE CURRENT OF cursor_name;
+ERROR: distributed modifications must target exactly one shard
+DETAIL: This command modifies all shards.
+HINT: Consider using an equality filter on partition column "id". You can use master_modify_multiple_shards() to perform multi-shard delete or update operations.
+INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69);
+-- simple UPDATE
+UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246;
+SELECT symbol FROM limit_orders_mx WHERE id = 246;
+ symbol
+--------
+ GM
+(1 row)
+
+-- simple UPDATE with RETURNING
+UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-----+--------+-----------+--------------------------+------+-------------
+ 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69
+(1 row)
+
+-- expression UPDATE
+UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246;
+SELECT bidder_id FROM limit_orders_mx WHERE id = 246;
+ bidder_id
+-----------
+ 18
+(1 row)
+
+-- expression UPDATE with RETURNING
+UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-----+--------+-----------+--------------------------+------+-------------
+ 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69
+(1 row)
+
+-- multi-column UPDATE
+UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246;
+SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246;
+ kind | limit_price
+------+-------------
+ buy | 0.00
+(1 row)
+
+-- multi-column UPDATE with RETURNING
+UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *;
+ id | symbol | bidder_id | placed_at | kind | limit_price
+-----+--------+-----------+--------------------------+------+-------------
+ 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999
+(1 row)
+
+-- Test that on unique contraint violations, we fail fast
+INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
+INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
+ERROR: duplicate key value violates unique constraint "limit_orders_mx_pkey_1220093"
+DETAIL: Key (id)=(275) already exists.
+CONTEXT: while executing command on localhost:57638
+-- commands with no constraints on the partition key are not supported
+UPDATE limit_orders_mx SET limit_price = 0.00;
+ERROR: distributed modifications must target exactly one shard
+DETAIL: This command modifies all shards.
+HINT: Consider using an equality filter on partition column "id". You can use master_modify_multiple_shards() to perform multi-shard delete or update operations.
+-- attempting to change the partition key is unsupported
+UPDATE limit_orders_mx SET id = 0 WHERE id = 246;
+ERROR: modifying the partition value of rows is not allowed
+-- UPDATEs with a FROM clause are unsupported
+UPDATE limit_orders_mx SET limit_price = 0.00 FROM bidders
+ WHERE limit_orders_mx.id = 246 AND
+ limit_orders_mx.bidder_id = bidders.id AND
+ bidders.name = 'Bernie Madoff';
+ERROR: cannot plan queries that include both regular and partitioned relations
+-- commands containing a CTE are unsupported
+WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *)
+UPDATE limit_orders_mx SET symbol = 'GM';
+ERROR: common table expressions are not supported in distributed modifications
+SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246;
+ symbol | bidder_id
+--------+-----------
+ GM | 30
+(1 row)
+
+-- updates referencing just a var are supported
+UPDATE limit_orders_mx SET bidder_id = id WHERE id = 246;
+-- updates referencing a column are supported
+UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246;
+-- IMMUTABLE functions are allowed
+UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246;
+SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246;
+ symbol | bidder_id
+--------+-----------
+ gm | 247
+(1 row)
+
+-- IMMUTABLE functions are allowed -- even in returning
+UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol;
+ id | lower | symbol
+-----+-------+--------
+ 246 | gm | GM
+(1 row)
+
+-- connect schema node to run the DDL
+\c - - - :master_port
+ALTER TABLE limit_orders_mx ADD COLUMN array_of_values integer[];
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+-- connect back to the other node
+\c - - - :worker_2_port
+-- updates referencing STABLE functions are allowed
+UPDATE limit_orders_mx SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246;
+-- so are binary operators
+UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246;
+-- connect back to the other node
+\c - - - :worker_2_port
+-- immutable function calls with vars are also allowed
+UPDATE limit_orders_mx
+SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246;
+CREATE FUNCTION stable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$
+LANGUAGE plpgsql STABLE;
+-- but STABLE function calls with vars are not allowed
+UPDATE limit_orders_mx
+SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246;
+ERROR: STABLE functions used in UPDATE queries cannot be called with column references
+SELECT array_of_values FROM limit_orders_mx WHERE id = 246;
+ array_of_values
+-----------------
+ {1,2}
+(1 row)
+
+-- STRICT functions work as expected
+CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS
+'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT;
+UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246;
+ERROR: null value in column "bidder_id" violates not-null constraint
+DETAIL: Failing row contains (246, GM, null, 2007-07-02 16:32:15, buy, 999, {1,2}).
+CONTEXT: while executing command on localhost:57637
+SELECT array_of_values FROM limit_orders_mx WHERE id = 246;
+ array_of_values
+-----------------
+ {1,2}
+(1 row)
+
+-- connect schema node to run the DDL
+\c - - - :master_port
+ALTER TABLE limit_orders_mx DROP array_of_values;
+NOTICE: using one-phase commit for distributed DDL commands
+HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc'
+-- connect back to the other node
+\c - - - :worker_2_port
+-- even in RETURNING
+UPDATE limit_orders_mx SET placed_at = placed_at WHERE id = 246 RETURNING NOW();
+ERROR: non-IMMUTABLE functions are not allowed in the RETURNING clause
+-- cursors are not supported
+UPDATE limit_orders_mx SET symbol = 'GM' WHERE CURRENT OF cursor_name;
+ERROR: distributed modifications must target exactly one shard
+DETAIL: This command modifies all shards.
+HINT: Consider using an equality filter on partition column "id". You can use master_modify_multiple_shards() to perform multi-shard delete or update operations.
+-- check that multi-row UPDATE/DELETEs with RETURNING work
+INSERT INTO multiple_hash_mx VALUES ('0', '1');
+INSERT INTO multiple_hash_mx VALUES ('0', '2');
+INSERT INTO multiple_hash_mx VALUES ('0', '3');
+INSERT INTO multiple_hash_mx VALUES ('0', '4');
+INSERT INTO multiple_hash_mx VALUES ('0', '5');
+INSERT INTO multiple_hash_mx VALUES ('0', '6');
+UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *;
+ category | data
+----------+------
+ 0 | 1-1
+ 0 | 2-1
+ 0 | 3-1
+ 0 | 4-1
+ 0 | 5-1
+ 0 | 6-1
+(6 rows)
+
+DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *;
+ category | data
+----------+------
+ 0 | 1-1
+ 0 | 2-1
+ 0 | 3-1
+ 0 | 4-1
+ 0 | 5-1
+ 0 | 6-1
+(6 rows)
+
+-- ensure returned row counters are correct
+\set QUIET off
+INSERT INTO multiple_hash_mx VALUES ('1', '1');
+INSERT 0 1
+INSERT INTO multiple_hash_mx VALUES ('1', '2');
+INSERT 0 1
+INSERT INTO multiple_hash_mx VALUES ('1', '3');
+INSERT 0 1
+INSERT INTO multiple_hash_mx VALUES ('2', '1');
+INSERT 0 1
+INSERT INTO multiple_hash_mx VALUES ('2', '2');
+INSERT 0 1
+INSERT INTO multiple_hash_mx VALUES ('2', '3');
+INSERT 0 1
+INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *;
+ category | data
+----------+------
+ 2 | 3
+(1 row)
+
+INSERT 0 1
+-- check that update return the right number of rows
+-- one row
+UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '1' AND data = '1';
+UPDATE 1
+-- three rows
+UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1';
+UPDATE 3
+-- three rows, with RETURNING
+UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category;
+ category
+----------
+ 1
+ 1
+ 1
+(3 rows)
+
+UPDATE 3
+-- check
+SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data;
+ category | data
+----------+---------
+ 1 | 1-1-2-2
+ 1 | 2-2-2
+ 1 | 3-2-2
+(3 rows)
+
+-- check that deletes return the right number of rows
+-- one row
+DELETE FROM multiple_hash_mx WHERE category = '2' AND data = '1';
+DELETE 1
+-- two rows
+DELETE FROM multiple_hash_mx WHERE category = '2';
+DELETE 3
+-- three rows, with RETURNING
+DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category;
+ category
+----------
+ 1
+ 1
+ 1
+(3 rows)
+
+DELETE 3
+-- check
+SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data;
+ category | data
+----------+------
+(0 rows)
+
+SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data;
+ category | data
+----------+------
+(0 rows)
+
+-- verify interaction of default values, SERIAL, and RETURNING
+\set QUIET on
+INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id;
+ id
+------------------
+ 4503599627370497
+(1 row)
+
+INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id;
+ id
+------------------
+ 4503599627370498
+(1 row)
+
+INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *;
+ id | app_id | name
+------------------+--------+------
+ 4503599627370499 | 103 | Mynt
+(1 row)
+
diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out
new file mode 100644
index 000000000..f702f2c53
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_modifying_xacts.out
@@ -0,0 +1,421 @@
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1340000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1340000;
+-- ===================================================================
+-- test end-to-end modification functionality for mx tables in transactions
+-- ===================================================================
+-- add some data
+INSERT INTO researchers_mx VALUES (1, 1, 'Donald Knuth');
+INSERT INTO researchers_mx VALUES (2, 1, 'Niklaus Wirth');
+INSERT INTO researchers_mx VALUES (3, 2, 'Tony Hoare');
+INSERT INTO researchers_mx VALUES (4, 2, 'Kenneth Iverson');
+-- replace a researcher, reusing their id on the schema node
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+INSERT INTO researchers_mx VALUES (2, 1, 'John Backus');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+ name
+-------------
+ John Backus
+(1 row)
+
+-- do it on the worker node as well
+\c - - - :worker_1_port
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+ name
+----------------------
+ John Backus Worker 1
+(1 row)
+
+-- do it on the worker other node as well
+\c - - - :worker_2_port
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+ name
+----------------------
+ John Backus Worker 2
+(1 row)
+
+\c - - - :master_port
+-- abort a modification
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ABORT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ name
+--------------
+ Donald Knuth
+(1 row)
+
+\c - - - :worker_1_port
+-- abort a modification on the worker node
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ABORT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ name
+--------------
+ Donald Knuth
+(1 row)
+
+\c - - - :worker_2_port
+-- abort a modification on the other worker node
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ABORT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ name
+--------------
+ Donald Knuth
+(1 row)
+
+-- switch back to the first worker node
+\c - - - :worker_1_port
+-- creating savepoints should work...
+BEGIN;
+INSERT INTO researchers_mx VALUES (5, 3, 'Dennis Ritchie');
+SAVEPOINT hire_thompson;
+INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6;
+ name
+--------------
+ Ken Thompson
+(1 row)
+
+-- even if created by PL/pgSQL...
+\set VERBOSITY terse
+BEGIN;
+DO $$
+BEGIN
+ INSERT INTO researchers_mx VALUES (10, 10, 'Edsger Dijkstra');
+EXCEPTION
+ WHEN not_null_violation THEN
+ RAISE NOTICE 'caught not_null_violation';
+END $$;
+COMMIT;
+-- but rollback should not
+BEGIN;
+INSERT INTO researchers_mx VALUES (7, 4, 'Jim Gray');
+SAVEPOINT hire_engelbart;
+INSERT INTO researchers_mx VALUES (8, 4, 'Douglas Engelbart');
+ROLLBACK TO hire_engelbart;
+COMMIT;
+ERROR: cannot ROLLBACK TO SAVEPOINT in transactions which modify distributed tables
+SELECT name FROM researchers_mx WHERE lab_id = 4;
+ name
+------
+(0 rows)
+
+BEGIN;
+DO $$
+BEGIN
+ INSERT INTO researchers_mx VALUES (NULL, 10, 'Edsger Dijkstra');
+EXCEPTION
+ WHEN not_null_violation THEN
+ RAISE NOTICE 'caught not_null_violation';
+END $$;
+NOTICE: caught not_null_violation
+COMMIT;
+ERROR: could not commit transaction for shard 1220100 on any active node
+\set VERBOSITY default
+-- should be valid to edit labs_mx after researchers_mx...
+BEGIN;
+INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
+INSERT INTO labs_mx VALUES (5, 'Los Alamos');
+COMMIT;
+SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
+ id | lab_id | name | id | name
+----+--------+-------------------+----+------------
+ 8 | 5 | Douglas Engelbart | 5 | Los Alamos
+(1 row)
+
+-- but not the other way around (would require expanding xact participants)...
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport');
+ERROR: no transaction participant matches localhost:57638
+DETAIL: Transactions which modify distributed tables may only target nodes affected by the modification command which began the transaction.
+COMMIT;
+-- have the same test on the other worker node
+\c - - - :worker_2_port
+-- should be valid to edit labs_mx after researchers_mx...
+BEGIN;
+INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
+INSERT INTO labs_mx VALUES (5, 'Los Alamos');
+COMMIT;
+SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
+ id | lab_id | name | id | name
+----+--------+-------------------+----+------------
+ 8 | 5 | Douglas Engelbart | 5 | Los Alamos
+ 8 | 5 | Douglas Engelbart | 5 | Los Alamos
+ 8 | 5 | Douglas Engelbart | 5 | Los Alamos
+ 8 | 5 | Douglas Engelbart | 5 | Los Alamos
+(4 rows)
+
+-- but not the other way around (would require expanding xact participants)...
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport');
+ERROR: no transaction participant matches localhost:57638
+DETAIL: Transactions which modify distributed tables may only target nodes affected by the modification command which began the transaction.
+COMMIT;
+-- switch back to the worker node
+\c - - - :worker_1_port
+-- this logic doesn't apply to router SELECTs occurring after a modification:
+-- selecting from the modified node is fine...
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+SELECT count(*) FROM researchers_mx WHERE lab_id = 6;
+ count
+-------
+ 0
+(1 row)
+
+ABORT;
+-- applies to DDL
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+ALTER TABLE labs_mx ADD COLUMN text motto;
+ERROR: distributed DDL commands must not appear within transaction blocks containing single-shard data modifications
+COMMIT;
+-- doesn't apply to COPY after modifications
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+\copy labs_mx from stdin delimiter ','
+COMMIT;
+-- copy will also work if before any modifications
+BEGIN;
+\copy labs_mx from stdin delimiter ','
+SELECT name FROM labs_mx WHERE id = 10;
+ name
+------------------
+ Weyland-Yutani-1
+ Weyland-Yutani-2
+(2 rows)
+
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+COMMIT;
+\c - - - :worker_1_port
+-- test primary key violations
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (1, 'orange');
+ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103"
+DETAIL: Key (id)=(1) already exists.
+CONTEXT: while executing command on localhost:57637
+COMMIT;
+-- data shouldn't have persisted...
+SELECT * FROM objects_mx WHERE id = 1;
+ id | name
+----+------
+(0 rows)
+
+-- same test on the second worker node
+\c - - - :worker_2_port
+-- test primary key violations
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (1, 'orange');
+ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103"
+DETAIL: Key (id)=(1) already exists.
+CONTEXT: while executing command on localhost:57637
+COMMIT;
+-- data shouldn't have persisted...
+SELECT * FROM objects_mx WHERE id = 1;
+ id | name
+----+------
+(0 rows)
+
+-- create trigger on one worker to reject certain values
+\c - - - :worker_1_port
+CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$
+ BEGIN
+ IF (NEW.name = 'BAD') THEN
+ RAISE 'illegal value';
+ END IF;
+
+ RETURN NEW;
+ END;
+$rb$ LANGUAGE plpgsql;
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON objects_mx_1220103
+DEFERRABLE INITIALLY IMMEDIATE
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+-- test partial failure; statement 1 successed, statement 2 fails
+\set VERBOSITY terse
+BEGIN;
+INSERT INTO labs_mx VALUES (7, 'E Corp');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+WARNING: illegal value
+ERROR: could not modify any active placements
+COMMIT;
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 2;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 7;
+ id | name
+----+------
+(0 rows)
+
+-- same failure test from worker 2
+\c - - - :worker_2_port
+-- test partial failure; statement 1 successed, statement 2 fails
+BEGIN;
+INSERT INTO labs_mx VALUES (7, 'E Corp');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+WARNING: illegal value
+ERROR: could not modify any active placements
+COMMIT;
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 2;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 7;
+ id | name
+----+------
+(0 rows)
+
+\c - - - :worker_1_port
+-- what if there are errors on different shards at different times?
+\c - - - :worker_1_port
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON labs_mx_1220102
+DEFERRABLE INITIALLY IMMEDIATE
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+WARNING: illegal value
+ERROR: could not modify any active placements
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+INSERT INTO labs_mx VALUES (9, 'BAD');
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+COMMIT;
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 1;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 8;
+ id | name
+----+------
+(0 rows)
+
+-- same test from the other worker
+\c - - - :worker_2_port
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+WARNING: illegal value
+ERROR: could not modify any active placements
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+INSERT INTO labs_mx VALUES (9, 'BAD');
+ERROR: current transaction is aborted, commands ignored until end of transaction block
+COMMIT;
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 1;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 8;
+ id | name
+----+------
+(0 rows)
+
+-- what if the failures happen at COMMIT time?
+\c - - - :worker_1_port
+DROP TRIGGER reject_bad_mx ON objects_mx_1220103;
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON objects_mx_1220103
+DEFERRABLE INITIALLY DEFERRED
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+-- should be the same story as before, just at COMMIT time
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation');
+COMMIT;
+WARNING: illegal value
+WARNING: failed to commit transaction on localhost:57637
+WARNING: could not commit transaction for shard 1220103 on any active node
+WARNING: could not commit transaction for shard 1220102 on any active node
+ERROR: could not commit transaction on any active node
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 2;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 7;
+ id | name
+----+------
+(0 rows)
+
+DROP TRIGGER reject_bad_mx ON labs_mx_1220102;
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON labs_mx_1220102
+DEFERRABLE INITIALLY DEFERRED
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+INSERT INTO labs_mx VALUES (9, 'BAD');
+COMMIT;
+WARNING: illegal value
+WARNING: failed to commit transaction on localhost:57637
+WARNING: could not commit transaction for shard 1220103 on any active node
+WARNING: could not commit transaction for shard 1220102 on any active node
+ERROR: could not commit transaction on any active node
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 1;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 8;
+ id | name
+----+------
+(0 rows)
+
+-- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails?
+\c - - - :worker_1_port
+DROP TRIGGER reject_bad_mx ON objects_mx_1220103;
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+INSERT INTO labs_mx VALUES (9, 'BAD');
+COMMIT;
+WARNING: illegal value
+WARNING: failed to commit transaction on localhost:57637
+WARNING: could not commit transaction for shard 1220103 on any active node
+WARNING: could not commit transaction for shard 1220102 on any active node
+ERROR: could not commit transaction on any active node
+-- no data should persists
+SELECT * FROM objects_mx WHERE id = 1;
+ id | name
+----+------
+(0 rows)
+
+SELECT * FROM labs_mx WHERE id = 8;
+ id | name
+----+------
+(0 rows)
+
diff --git a/src/test/regress/expected/multi_mx_reference_table.out b/src/test/regress/expected/multi_mx_reference_table.out
new file mode 100644
index 000000000..34c39f1b0
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_reference_table.out
@@ -0,0 +1,840 @@
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+\c - - - :master_port
+CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_reference_table('reference_table_test');
+ create_reference_table
+------------------------
+
+(1 row)
+
+INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03');
+INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04');
+INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05');
+\c - - - :worker_1_port
+-- run some queries on top of the data
+SELECT
+ *
+FROM
+ reference_table_test;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016
+ 3 | 3 | 3 | Sat Dec 03 00:00:00 2016
+ 4 | 4 | 4 | Sun Dec 04 00:00:00 2016
+ 5 | 5 | 5 | Mon Dec 05 00:00:00 2016
+(5 rows)
+
+SELECT
+ *
+FROM
+ reference_table_test
+WHERE
+ value_1 = 1;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+(1 row)
+
+SELECT
+ value_1,
+ value_2
+FROM
+ reference_table_test
+ORDER BY
+ 2 ASC LIMIT 3;
+ value_1 | value_2
+---------+---------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(3 rows)
+
+SELECT
+ value_1, value_3
+FROM
+ reference_table_test
+WHERE
+ value_2 >= 4
+ORDER BY
+ 2 LIMIT 3;
+ value_1 | value_3
+---------+---------
+ 4 | 4
+ 5 | 5
+(2 rows)
+
+SELECT
+ value_1, 15 * value_2
+FROM
+ reference_table_test
+ORDER BY
+ 2 ASC
+LIMIT 2;
+ value_1 | ?column?
+---------+----------
+ 1 | 15
+ 2 | 30
+(2 rows)
+
+SELECT
+ value_1, 15 * value_2
+FROM
+ reference_table_test
+ORDER BY
+ 2 ASC LIMIT 2 OFFSET 2;
+ value_1 | ?column?
+---------+----------
+ 3 | 45
+ 4 | 60
+(2 rows)
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 = 2 OR value_2 = 3;
+ value_2 | value_4
+---------+--------------------------
+ 2 | Fri Dec 02 00:00:00 2016
+ 3 | Sat Dec 03 00:00:00 2016
+(2 rows)
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 = 2 AND value_2 = 3;
+ value_2 | value_4
+---------+---------
+(0 rows)
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ value_3 = '2' OR value_1 = 3;
+ value_2 | value_4
+---------+--------------------------
+ 2 | Fri Dec 02 00:00:00 2016
+ 3 | Sat Dec 03 00:00:00 2016
+(2 rows)
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ (
+ value_3 = '2' OR value_1 = 3
+ )
+ AND FALSE;
+ value_2 | value_4
+---------+---------
+(0 rows)
+
+SELECT
+ *
+FROM
+ reference_table_test
+WHERE
+ value_2 IN
+ (
+ SELECT
+ value_3::FLOAT
+ FROM
+ reference_table_test
+ )
+ AND value_1 < 3;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016
+(2 rows)
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_3 IN
+ (
+ '1', '2'
+ );
+ value_4
+--------------------------
+ Thu Dec 01 00:00:00 2016
+ Fri Dec 02 00:00:00 2016
+(2 rows)
+
+SELECT
+ date_part('day', value_4)
+FROM
+ reference_table_test
+WHERE
+ value_3 IN
+ (
+ '5', '2'
+ );
+ date_part
+-----------
+ 2
+ 5
+(2 rows)
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 <= 2 AND value_2 >= 4;
+ value_4
+---------
+(0 rows)
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 <= 20 AND value_2 >= 4;
+ value_4
+--------------------------
+ Sun Dec 04 00:00:00 2016
+ Mon Dec 05 00:00:00 2016
+(2 rows)
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 >= 5 AND value_2 <= random();
+ value_4
+---------
+(0 rows)
+
+SELECT
+ value_1
+FROM
+ reference_table_test
+WHERE
+ value_4 BETWEEN '2016-12-01' AND '2016-12-03';
+ value_1
+---------
+ 1
+ 2
+ 3
+(3 rows)
+
+SELECT
+ value_1
+FROM
+ reference_table_test
+WHERE
+ FALSE;
+ value_1
+---------
+(0 rows)
+
+SELECT
+ value_1
+FROM
+ reference_table_test
+WHERE
+ int4eq(1, 2);
+ value_1
+---------
+(0 rows)
+
+-- rename output name and do some operations
+SELECT
+ value_1 as id, value_2 * 15 as age
+FROM
+ reference_table_test;
+ id | age
+----+-----
+ 1 | 15
+ 2 | 30
+ 3 | 45
+ 4 | 60
+ 5 | 75
+(5 rows)
+
+-- queries with CTEs are supported
+WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3)
+SELECT
+ *
+FROM
+ some_data;
+ value_2 | value_4
+---------+--------------------------
+ 3 | Sat Dec 03 00:00:00 2016
+ 4 | Sun Dec 04 00:00:00 2016
+ 5 | Mon Dec 05 00:00:00 2016
+(3 rows)
+
+-- queries with CTEs are supported even if CTE is not referenced inside query
+WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3)
+SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+(1 row)
+
+-- queries which involve functions in FROM clause are supported if it goes to a single worker.
+SELECT
+ *
+FROM
+ reference_table_test, position('om' in 'Thomas')
+WHERE
+ value_1 = 1;
+ value_1 | value_2 | value_3 | value_4 | position
+---------+---------+---------+--------------------------+----------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3
+(1 row)
+
+SELECT
+ *
+FROM
+ reference_table_test, position('om' in 'Thomas')
+WHERE
+ value_1 = 1 OR value_1 = 2;
+ value_1 | value_2 | value_3 | value_4 | position
+---------+---------+---------+--------------------------+----------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3
+(2 rows)
+
+-- set operations are supported
+(SELECT * FROM reference_table_test WHERE value_1 = 1)
+UNION
+(SELECT * FROM reference_table_test WHERE value_1 = 3);
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 3 | 3 | 3 | Sat Dec 03 00:00:00 2016
+(2 rows)
+
+(SELECT * FROM reference_table_test WHERE value_1 = 1)
+EXCEPT
+(SELECT * FROM reference_table_test WHERE value_1 = 3);
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+(1 row)
+
+(SELECT * FROM reference_table_test WHERE value_1 = 1)
+INTERSECT
+(SELECT * FROM reference_table_test WHERE value_1 = 3);
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+---------
+(0 rows)
+
+-- to make the tests more interested for aggregation tests, ingest some more data
+\c - - - :master_port
+INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03');
+\c - - - :worker_1_port
+-- some aggregations
+SELECT
+ value_4, SUM(value_2)
+FROM
+ reference_table_test
+GROUP BY
+ value_4
+HAVING
+ SUM(value_2) > 3
+ORDER BY
+ 1;
+ value_4 | sum
+--------------------------+-----
+ Fri Dec 02 00:00:00 2016 | 4
+ Sat Dec 03 00:00:00 2016 | 6
+ Sun Dec 04 00:00:00 2016 | 4
+ Mon Dec 05 00:00:00 2016 | 5
+(4 rows)
+
+SELECT
+ value_4,
+ value_3,
+ SUM(value_2)
+FROM
+ reference_table_test
+GROUP BY
+ GROUPING sets ((value_4), (value_3))
+ORDER BY 1, 2, 3;
+ value_4 | value_3 | sum
+--------------------------+---------+-----
+ Thu Dec 01 00:00:00 2016 | | 2
+ Fri Dec 02 00:00:00 2016 | | 4
+ Sat Dec 03 00:00:00 2016 | | 6
+ Sun Dec 04 00:00:00 2016 | | 4
+ Mon Dec 05 00:00:00 2016 | | 5
+ | 1 | 2
+ | 2 | 4
+ | 3 | 6
+ | 4 | 4
+ | 5 | 5
+(10 rows)
+
+-- distinct clauses also work fine
+SELECT DISTINCT
+ value_4
+FROM
+ reference_table_test
+ORDER BY
+ 1;
+ value_4
+--------------------------
+ Thu Dec 01 00:00:00 2016
+ Fri Dec 02 00:00:00 2016
+ Sat Dec 03 00:00:00 2016
+ Sun Dec 04 00:00:00 2016
+ Mon Dec 05 00:00:00 2016
+(5 rows)
+
+-- window functions are also supported
+SELECT
+ value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4)
+FROM
+ reference_table_test;
+ value_4 | rank
+--------------------------+------
+ Thu Dec 01 00:00:00 2016 | 1
+ Thu Dec 01 00:00:00 2016 | 1
+ Fri Dec 02 00:00:00 2016 | 1
+ Fri Dec 02 00:00:00 2016 | 1
+ Sat Dec 03 00:00:00 2016 | 1
+ Sat Dec 03 00:00:00 2016 | 1
+ Sun Dec 04 00:00:00 2016 | 1
+ Mon Dec 05 00:00:00 2016 | 1
+(8 rows)
+
+-- window functions are also supported
+SELECT
+ value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4)
+FROM
+ reference_table_test;
+ value_4 | avg
+--------------------------+------------------------
+ Thu Dec 01 00:00:00 2016 | 1.00000000000000000000
+ Thu Dec 01 00:00:00 2016 | 1.00000000000000000000
+ Fri Dec 02 00:00:00 2016 | 2.0000000000000000
+ Fri Dec 02 00:00:00 2016 | 2.0000000000000000
+ Sat Dec 03 00:00:00 2016 | 3.0000000000000000
+ Sat Dec 03 00:00:00 2016 | 3.0000000000000000
+ Sun Dec 04 00:00:00 2016 | 4.0000000000000000
+ Mon Dec 05 00:00:00 2016 | 5.0000000000000000
+(8 rows)
+
+SELECT
+ count(DISTINCT CASE
+ WHEN
+ value_2 >= 3
+ THEN
+ value_2
+ ELSE
+ NULL
+ END) as c
+ FROM
+ reference_table_test;
+ c
+---
+ 3
+(1 row)
+
+SELECT
+ value_1,
+ count(DISTINCT CASE
+ WHEN
+ value_2 >= 3
+ THEN
+ value_2
+ ELSE
+ NULL
+ END) as c
+ FROM
+ reference_table_test
+ GROUP BY
+ value_1
+ ORDER BY
+ 1;
+ value_1 | c
+---------+---
+ 1 | 0
+ 2 | 0
+ 3 | 1
+ 4 | 1
+ 5 | 1
+(5 rows)
+
+-- selects inside a transaction works fine as well
+BEGIN;
+SELECT * FROM reference_table_test;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016
+ 3 | 3 | 3 | Sat Dec 03 00:00:00 2016
+ 4 | 4 | 4 | Sun Dec 04 00:00:00 2016
+ 5 | 5 | 5 | Mon Dec 05 00:00:00 2016
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016
+ 3 | 3 | 3 | Sat Dec 03 00:00:00 2016
+(8 rows)
+
+SELECT * FROM reference_table_test WHERE value_1 = 1;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+(2 rows)
+
+END;
+-- cursor queries also works fine
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM reference_table_test
+ WHERE value_1 = 1 OR value_1 = 2
+ ORDER BY value_1;
+FETCH test_cursor;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+(1 row)
+
+FETCH ALL test_cursor;
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+--------------------------
+ 1 | 1 | 1 | Thu Dec 01 00:00:00 2016
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016
+ 2 | 2 | 2 | Fri Dec 02 00:00:00 2016
+(3 rows)
+
+FETCH test_cursor; -- fetch one row after the last
+ value_1 | value_2 | value_3 | value_4
+---------+---------+---------+---------
+(0 rows)
+
+END;
+-- table creation queries inside can be router plannable
+CREATE TEMP TABLE temp_reference_test as
+ SELECT *
+ FROM reference_table_test
+ WHERE value_1 = 1;
+\c - - - :master_port
+-- all kinds of joins are supported among reference tables
+-- first create two more tables
+CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_reference_table('reference_table_test_second');
+ create_reference_table
+------------------------
+
+(1 row)
+
+CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_reference_table('reference_table_test_third');
+ create_reference_table
+------------------------
+
+(1 row)
+
+-- ingest some data to both tables
+INSERT INTO reference_table_test_second VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test_second VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03');
+INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04');
+INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05');
+\c - - - :worker_2_port
+-- some very basic tests
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = t2.value_2
+ORDER BY
+ 1;
+ value_1
+---------
+ 1
+ 2
+ 3
+(3 rows)
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_third t3
+WHERE
+ t1.value_2 = t3.value_2
+ORDER BY
+ 1;
+ value_1
+---------
+ 4
+ 5
+(2 rows)
+
+SELECT
+ DISTINCT t2.value_1
+FROM
+ reference_table_test_second t2, reference_table_test_third t3
+WHERE
+ t2.value_2 = t3.value_2
+ORDER BY
+ 1;
+ value_1
+---------
+(0 rows)
+
+-- join on different columns and different data types via casts
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = t2.value_1
+ORDER BY
+ 1;
+ value_1
+---------
+ 1
+ 2
+ 3
+(3 rows)
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = t2.value_3::int
+ORDER BY
+ 1;
+ value_1
+---------
+ 1
+ 2
+ 3
+(3 rows)
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = date_part('day', t2.value_4)
+ORDER BY
+ 1;
+ value_1
+---------
+ 1
+ 2
+ 3
+(3 rows)
+
+-- ingest a common row to see more meaningful results with joins involving 3 tables
+\c - - - :master_port
+INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03');
+\c - - - :worker_1_port
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3
+WHERE
+ t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2
+ORDER BY
+ 1;
+ value_1
+---------
+ 3
+(1 row)
+
+-- same query on different columns
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3
+WHERE
+ t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1
+ORDER BY
+ 1;
+ value_1
+---------
+ 3
+(1 row)
+
+-- with the JOIN syntax
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1 JOIN reference_table_test_second t2 USING (value_1)
+ JOIN reference_table_test_third t3 USING (value_1)
+ORDER BY
+ 1;
+ value_1
+---------
+ 3
+(1 row)
+
+-- and left/right joins
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1 LEFT JOIN reference_table_test_second t2 USING (value_1)
+ LEFT JOIN reference_table_test_third t3 USING (value_1)
+ORDER BY
+ 1;
+ value_1
+---------
+ 1
+ 2
+ 3
+ 4
+ 5
+(5 rows)
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1 RIGHT JOIN reference_table_test_second t2 USING (value_1)
+ RIGHT JOIN reference_table_test_third t3 USING (value_1)
+ORDER BY
+ 1;
+ value_1
+---------
+ 3
+
+(2 rows)
+
+\c - - - :master_port
+SET citus.shard_count TO 6;
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_distributed_table('colocated_table_test', 'value_1');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_distributed_table('colocated_table_test_2', 'value_1');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+DELETE FROM reference_table_test;
+INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO colocated_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02');
+\c - - - :worker_1_port
+SET client_min_messages TO DEBUG1;
+SET citus.log_multi_join_order TO TRUE;
+SELECT
+ reference_table_test.value_1
+FROM
+ reference_table_test, colocated_table_test
+WHERE
+ colocated_table_test.value_1 = reference_table_test.value_1;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ]
+ value_1
+---------
+ 1
+ 2
+(2 rows)
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test
+WHERE
+ colocated_table_test.value_2 = reference_table_test.value_2;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ]
+ value_2
+---------
+ 1
+ 2
+(2 rows)
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ colocated_table_test, reference_table_test
+WHERE
+ reference_table_test.value_1 = colocated_table_test.value_1;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ]
+ value_2
+---------
+ 1
+ 2
+(2 rows)
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_2 = reference_table_test.value_2;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ cartesian product "colocated_table_test_2" ]
+ERROR: cannot perform distributed planning on this query
+DETAIL: Cartesian products are currently unsupported
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ local partition join "colocated_table_test_2" ]
+ value_2
+---------
+ 1
+ 2
+(2 rows)
+
+SET citus.task_executor_type to "task-tracker";
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ dual partition join "colocated_table_test_2" ]
+ value_2
+---------
+ 1
+ 2
+(2 rows)
+
+SELECT
+ reference_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1;
+LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ dual partition join "colocated_table_test_2" ]
+ value_2
+---------
+ 1
+ 2
+(2 rows)
+
+SET client_min_messages TO NOTICE;
+SET citus.log_multi_join_order TO FALSE;
+-- clean up tables
+\c - - - :master_port
+DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third;;
diff --git a/src/test/regress/expected/multi_mx_repartition_join_w1.out b/src/test/regress/expected/multi_mx_repartition_join_w1.out
new file mode 100644
index 000000000..f0057e5fc
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_repartition_join_w1.out
@@ -0,0 +1,13 @@
+-- Test two concurrent reparttition joins from two different workers
+-- This test runs the below query from the :worker_1_port and the
+-- concurrent test runs the same query on :worker_2_port. Note that, both
+-- tests use the same sequence ids but the queries should not fail.
+\c - - - :worker_1_port
+SET citus.task_executor_type TO "task-tracker";
+CREATE TEMP TABLE t1 AS
+SELECT
+ l1.l_comment
+FROM
+ lineitem_mx l1, orders_mx l2
+WHERE
+ l1.l_comment = l2.o_comment;
diff --git a/src/test/regress/expected/multi_mx_repartition_join_w2.out b/src/test/regress/expected/multi_mx_repartition_join_w2.out
new file mode 100644
index 000000000..4913108fa
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_repartition_join_w2.out
@@ -0,0 +1,13 @@
+-- Test two concurrent reparttition joins from two different workers
+-- This test runs the below query from the :worker_2_port and the
+-- concurrent test runs the same query on :worker_1_port. Note that, both
+-- tests use the same sequence ids but the queries should not fail.
+\c - - - :worker_2_port
+SET citus.task_executor_type TO "task-tracker";
+CREATE TEMP TABLE t1 AS
+SELECT
+ l1.l_comment
+FROM
+ lineitem_mx l1, orders_mx l2
+WHERE
+ l1.l_comment = l2.o_comment;
diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out
new file mode 100644
index 000000000..c0bf72d1b
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out
@@ -0,0 +1,210 @@
+--
+-- MULTI_MX_REPARTITION_UDT_PREPARE
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
+-- START type creation
+CREATE TYPE test_udt AS (i integer, i2 integer);
+-- ... as well as a function to use as its comparator...
+CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
+AS 'select $1.i = $2.i AND $1.i2 = $2.i2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- ... use that function to create a custom equality operator...
+CREATE OPERATOR = (
+ LEFTARG = test_udt,
+ RIGHTARG = test_udt,
+ PROCEDURE = equal_test_udt_function,
+ COMMUTATOR = =,
+ HASHES
+);
+-- ... and create a custom operator family for hash indexes...
+CREATE OPERATOR FAMILY tudt_op_fam USING hash;
+-- ... create a test HASH function. Though it is a poor hash function,
+-- it is acceptable for our tests
+CREATE FUNCTION test_udt_hash(test_udt) RETURNS int
+AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- We need to define two different operator classes for the composite types
+-- One uses BTREE the other uses HASH
+CREATE OPERATOR CLASS tudt_op_fam_clas3
+DEFAULT FOR TYPE test_udt USING BTREE AS
+OPERATOR 3 = (test_udt, test_udt);
+CREATE OPERATOR CLASS tudt_op_fam_class
+DEFAULT FOR TYPE test_udt USING HASH AS
+OPERATOR 1 = (test_udt, test_udt),
+FUNCTION 1 test_udt_hash(test_udt);
+-- END type creation
+CREATE TABLE repartition_udt (
+ pk integer not null,
+ udtcol test_udt,
+ txtcol text
+);
+CREATE TABLE repartition_udt_other (
+ pk integer not null,
+ udtcol test_udt,
+ txtcol text
+);
+-- Connect directly to a worker, create and drop the type, then
+-- proceed with type creation as above; thus the OIDs will be different.
+-- so that the OID is off.
+\c - - - :worker_1_port
+CREATE TYPE test_udt AS (i integer, i2 integer);
+DROP TYPE test_udt CASCADE;
+-- START type creation
+CREATE TYPE test_udt AS (i integer, i2 integer);
+-- ... as well as a function to use as its comparator...
+CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
+AS 'select $1.i = $2.i AND $1.i2 = $2.i2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- ... use that function to create a custom equality operator...
+CREATE OPERATOR = (
+ LEFTARG = test_udt,
+ RIGHTARG = test_udt,
+ PROCEDURE = equal_test_udt_function,
+ COMMUTATOR = =,
+ HASHES
+);
+-- ... and create a custom operator family for hash indexes...
+CREATE OPERATOR FAMILY tudt_op_fam USING hash;
+-- ... create a test HASH function. Though it is a poor hash function,
+-- it is acceptable for our tests
+CREATE FUNCTION test_udt_hash(test_udt) RETURNS int
+AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- We need to define two different operator classes for the composite types
+-- One uses BTREE the other uses HASH
+CREATE OPERATOR CLASS tudt_op_fam_clas3
+DEFAULT FOR TYPE test_udt USING BTREE AS
+OPERATOR 3 = (test_udt, test_udt);
+CREATE OPERATOR CLASS tudt_op_fam_class
+DEFAULT FOR TYPE test_udt USING HASH AS
+OPERATOR 1 = (test_udt, test_udt),
+FUNCTION 1 test_udt_hash(test_udt);
+-- END type creation
+\c - - - :worker_2_port
+-- START type creation
+CREATE TYPE test_udt AS (i integer, i2 integer);
+-- ... as well as a function to use as its comparator...
+CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
+AS 'select $1.i = $2.i AND $1.i2 = $2.i2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- ... use that function to create a custom equality operator...
+CREATE OPERATOR = (
+ LEFTARG = test_udt,
+ RIGHTARG = test_udt,
+ PROCEDURE = equal_test_udt_function,
+ COMMUTATOR = =,
+ HASHES
+);
+-- ... and create a custom operator family for hash indexes...
+CREATE OPERATOR FAMILY tudt_op_fam USING hash;
+-- ... create a test HASH function. Though it is a poor hash function,
+-- it is acceptable for our tests
+CREATE FUNCTION test_udt_hash(test_udt) RETURNS int
+AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+-- We need to define two different operator classes for the composite types
+-- One uses BTREE the other uses HASH
+CREATE OPERATOR CLASS tudt_op_fam_clas3
+DEFAULT FOR TYPE test_udt USING BTREE AS
+OPERATOR 3 = (test_udt, test_udt);
+CREATE OPERATOR CLASS tudt_op_fam_class
+DEFAULT FOR TYPE test_udt USING HASH AS
+OPERATOR 1 = (test_udt, test_udt),
+FUNCTION 1 test_udt_hash(test_udt);
+-- END type creation
+-- Connect to master
+\c - - - :master_port
+-- Distribute and populate the two tables.
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+SET citus.shard_count TO 3;
+SELECT create_distributed_table('repartition_udt', 'pk');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+SET citus.shard_count TO 5;
+SELECT create_distributed_table('repartition_udt_other', 'pk');
+ create_distributed_table
+--------------------------
+
+(1 row)
+
+INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (3, '(1,3)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (4, '(2,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (5, '(2,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (6, '(2,3)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (7, '(1,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (8, '(1,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (9, '(1,3)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo');
+SET client_min_messages = LOG;
+-- Query that should result in a repartition join on int column, and be empty.
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.pk = repartition_udt_other.pk
+ WHERE repartition_udt.pk > 1;
+ pk | udtcol | txtcol | pk | udtcol | txtcol
+----+--------+--------+----+--------+--------
+(0 rows)
+
+-- Query that should result in a repartition join on UDT column.
+SET citus.large_table_shard_count = 1;
+SET citus.task_executor_type = 'task-tracker';
+SET citus.log_multi_join_order = true;
+EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1;
+LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Distributed Query into pg_merge_job_535003
+ Executor: Task-Tracker
+ Task Count: 4
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 4
+ -> MapMergeJob
+ Map Task Count: 5
+ Merge Task Count: 4
+ Master Query
+ -> Seq Scan on pg_merge_job_535003 (cost=0.00..0.00 rows=0 width=0)
+(12 rows)
+
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1
+ ORDER BY repartition_udt.pk;
+LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
+ pk | udtcol | txtcol | pk | udtcol | txtcol
+----+--------+--------+----+--------+--------
+ 2 | (1,2) | foo | 8 | (1,2) | foo
+ 3 | (1,3) | foo | 9 | (1,3) | foo
+ 4 | (2,1) | foo | 10 | (2,1) | foo
+ 5 | (2,2) | foo | 11 | (2,2) | foo
+ 6 | (2,3) | foo | 12 | (2,3) | foo
+(5 rows)
+
+
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w1.out b/src/test/regress/expected/multi_mx_repartition_udt_w1.out
new file mode 100644
index 000000000..7675fc6e9
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_repartition_udt_w1.out
@@ -0,0 +1,32 @@
+--
+-- MULTI_MX_REPARTITION_W1_UDT
+--
+\c - - - :worker_1_port
+SET client_min_messages = LOG;
+-- Query that should result in a repartition join on UDT column.
+SET citus.large_table_shard_count = 1;
+SET citus.task_executor_type = 'task-tracker';
+SET citus.log_multi_join_order = true;
+-- Query that should result in a repartition join on int column, and be empty.
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.pk = repartition_udt_other.pk
+ WHERE repartition_udt.pk > 1;
+LOG: join order: [ "repartition_udt" ][ local partition join "repartition_udt_other" ]
+ pk | udtcol | txtcol | pk | udtcol | txtcol
+----+--------+--------+----+--------+--------
+(0 rows)
+
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1
+ ORDER BY repartition_udt.pk;
+LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
+ pk | udtcol | txtcol | pk | udtcol | txtcol
+----+--------+--------+----+--------+--------
+ 2 | (1,2) | foo | 8 | (1,2) | foo
+ 3 | (1,3) | foo | 9 | (1,3) | foo
+ 4 | (2,1) | foo | 10 | (2,1) | foo
+ 5 | (2,2) | foo | 11 | (2,2) | foo
+ 6 | (2,3) | foo | 12 | (2,3) | foo
+(5 rows)
+
diff --git a/src/test/regress/expected/multi_mx_repartition_udt_w2.out b/src/test/regress/expected/multi_mx_repartition_udt_w2.out
new file mode 100644
index 000000000..362e591fb
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_repartition_udt_w2.out
@@ -0,0 +1,32 @@
+--
+-- MULTI_MX_REPARTITION_W2_UDT
+--
+\c - - - :worker_2_port
+SET client_min_messages = LOG;
+-- Query that should result in a repartition join on UDT column.
+SET citus.large_table_shard_count = 1;
+SET citus.task_executor_type = 'task-tracker';
+SET citus.log_multi_join_order = true;
+-- Query that should result in a repartition join on int column, and be empty.
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.pk = repartition_udt_other.pk
+ WHERE repartition_udt.pk > 1;
+LOG: join order: [ "repartition_udt" ][ local partition join "repartition_udt_other" ]
+ pk | udtcol | txtcol | pk | udtcol | txtcol
+----+--------+--------+----+--------+--------
+(0 rows)
+
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1
+ ORDER BY repartition_udt.pk;
+LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ]
+ pk | udtcol | txtcol | pk | udtcol | txtcol
+----+--------+--------+----+--------+--------
+ 2 | (1,2) | foo | 8 | (1,2) | foo
+ 3 | (1,3) | foo | 9 | (1,3) | foo
+ 4 | (2,1) | foo | 10 | (2,1) | foo
+ 5 | (2,2) | foo | 11 | (2,2) | foo
+ 6 | (2,3) | foo | 12 | (2,3) | foo
+(5 rows)
+
diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out
new file mode 100644
index 000000000..95a851afe
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_router_planner.out
@@ -0,0 +1,1521 @@
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
+-- ===================================================================
+-- test router planner functionality for single shard select queries
+-- ===================================================================
+-- run all the router queries from the one of the workers
+\c - - - :worker_1_port
+-- this table is used in a CTE test
+CREATE TABLE authors_hash_mx ( name text, id bigint );
+-- create a bunch of test data
+INSERT INTO articles_hash_mx VALUES ( 1, 1, 'arsenous', 9572);
+INSERT INTO articles_hash_mx VALUES ( 2, 2, 'abducing', 13642);
+INSERT INTO articles_hash_mx VALUES ( 3, 3, 'asternal', 10480);
+INSERT INTO articles_hash_mx VALUES ( 4, 4, 'altdorfer', 14551);
+INSERT INTO articles_hash_mx VALUES ( 5, 5, 'aruru', 11389);
+INSERT INTO articles_hash_mx VALUES ( 6, 6, 'atlases', 15459);
+INSERT INTO articles_hash_mx VALUES ( 7, 7, 'aseptic', 12298);
+INSERT INTO articles_hash_mx VALUES ( 8, 8, 'agatized', 16368);
+INSERT INTO articles_hash_mx VALUES ( 9, 9, 'alligate', 438);
+INSERT INTO articles_hash_mx VALUES (10, 10, 'aggrandize', 17277);
+INSERT INTO articles_hash_mx VALUES (11, 1, 'alamo', 1347);
+INSERT INTO articles_hash_mx VALUES (12, 2, 'archiblast', 18185);
+INSERT INTO articles_hash_mx VALUES (13, 3, 'aseyev', 2255);
+INSERT INTO articles_hash_mx VALUES (14, 4, 'andesite', 19094);
+INSERT INTO articles_hash_mx VALUES (15, 5, 'adversa', 3164);
+INSERT INTO articles_hash_mx VALUES (16, 6, 'allonym', 2);
+INSERT INTO articles_hash_mx VALUES (17, 7, 'auriga', 4073);
+INSERT INTO articles_hash_mx VALUES (18, 8, 'assembly', 911);
+INSERT INTO articles_hash_mx VALUES (19, 9, 'aubergiste', 4981);
+INSERT INTO articles_hash_mx VALUES (20, 10, 'absentness', 1820);
+INSERT INTO articles_hash_mx VALUES (21, 1, 'arcading', 5890);
+INSERT INTO articles_hash_mx VALUES (22, 2, 'antipope', 2728);
+INSERT INTO articles_hash_mx VALUES (23, 3, 'abhorring', 6799);
+INSERT INTO articles_hash_mx VALUES (24, 4, 'audacious', 3637);
+INSERT INTO articles_hash_mx VALUES (25, 5, 'antehall', 7707);
+INSERT INTO articles_hash_mx VALUES (26, 6, 'abington', 4545);
+INSERT INTO articles_hash_mx VALUES (27, 7, 'arsenous', 8616);
+INSERT INTO articles_hash_mx VALUES (28, 8, 'aerophyte', 5454);
+INSERT INTO articles_hash_mx VALUES (29, 9, 'amateur', 9524);
+INSERT INTO articles_hash_mx VALUES (30, 10, 'andelee', 6363);
+INSERT INTO articles_hash_mx VALUES (31, 1, 'athwartships', 7271);
+INSERT INTO articles_hash_mx VALUES (32, 2, 'amazon', 11342);
+INSERT INTO articles_hash_mx VALUES (33, 3, 'autochrome', 8180);
+INSERT INTO articles_hash_mx VALUES (34, 4, 'amnestied', 12250);
+INSERT INTO articles_hash_mx VALUES (35, 5, 'aminate', 9089);
+INSERT INTO articles_hash_mx VALUES (36, 6, 'ablation', 13159);
+INSERT INTO articles_hash_mx VALUES (37, 7, 'archduchies', 9997);
+INSERT INTO articles_hash_mx VALUES (38, 8, 'anatine', 14067);
+INSERT INTO articles_hash_mx VALUES (39, 9, 'anchises', 10906);
+INSERT INTO articles_hash_mx VALUES (40, 10, 'attemper', 14976);
+INSERT INTO articles_hash_mx VALUES (41, 1, 'aznavour', 11814);
+INSERT INTO articles_hash_mx VALUES (42, 2, 'ausable', 15885);
+INSERT INTO articles_hash_mx VALUES (43, 3, 'affixal', 12723);
+INSERT INTO articles_hash_mx VALUES (44, 4, 'anteport', 16793);
+INSERT INTO articles_hash_mx VALUES (45, 5, 'afrasia', 864);
+INSERT INTO articles_hash_mx VALUES (46, 6, 'atlanta', 17702);
+INSERT INTO articles_hash_mx VALUES (47, 7, 'abeyance', 1772);
+INSERT INTO articles_hash_mx VALUES (48, 8, 'alkylic', 18610);
+INSERT INTO articles_hash_mx VALUES (49, 9, 'anyone', 2681);
+INSERT INTO articles_hash_mx VALUES (50, 10, 'anjanette', 19519);
+SET citus.task_executor_type TO 'real-time';
+SET citus.large_table_shard_count TO 2;
+SET client_min_messages TO 'DEBUG2';
+-- insert a single row for the test
+INSERT INTO articles_single_shard_hash_mx VALUES (50, 10, 'anjanette', 19519);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+-- single-shard tests
+-- test simple select for a single row
+SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+-----------+------------
+ 50 | 10 | anjanette | 19519
+(1 row)
+
+-- get all titles by a single author
+SELECT title FROM articles_hash_mx WHERE author_id = 10;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ title
+------------
+ aggrandize
+ absentness
+ andelee
+ attemper
+ anjanette
+(5 rows)
+
+-- try ordering them by word count
+SELECT title, word_count FROM articles_hash_mx
+ WHERE author_id = 10
+ ORDER BY word_count DESC NULLS LAST;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ title | word_count
+------------+------------
+ anjanette | 19519
+ aggrandize | 17277
+ attemper | 14976
+ andelee | 6363
+ absentness | 1820
+(5 rows)
+
+-- look at last two articles by an author
+SELECT title, id FROM articles_hash_mx
+ WHERE author_id = 5
+ ORDER BY id
+ LIMIT 2;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ title | id
+---------+----
+ aruru | 5
+ adversa | 15
+(2 rows)
+
+-- find all articles by two authors in same shard
+-- but plan is not router executable due to order by
+SELECT title, author_id FROM articles_hash_mx
+ WHERE author_id = 7 OR author_id = 8
+ ORDER BY author_id ASC, id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ title | author_id
+-------------+-----------
+ aseptic | 7
+ auriga | 7
+ arsenous | 7
+ archduchies | 7
+ abeyance | 7
+ agatized | 8
+ assembly | 8
+ aerophyte | 8
+ anatine | 8
+ alkylic | 8
+(10 rows)
+
+-- same query is router executable with no order by
+SELECT title, author_id FROM articles_hash_mx
+ WHERE author_id = 7 OR author_id = 8;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ title | author_id
+-------------+-----------
+ aseptic | 7
+ agatized | 8
+ auriga | 7
+ assembly | 8
+ arsenous | 7
+ aerophyte | 8
+ archduchies | 7
+ anatine | 8
+ abeyance | 7
+ alkylic | 8
+(10 rows)
+
+-- add in some grouping expressions, still on same shard
+-- having queries unsupported in Citus
+SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx
+ WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10
+ GROUP BY author_id
+ HAVING sum(word_count) > 1000
+ ORDER BY sum(word_count) DESC;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ author_id | corpus_size
+-----------+-------------
+ 10 | 59955
+ 8 | 55410
+ 7 | 36756
+ 1 | 35894
+(4 rows)
+
+-- however having clause is supported if it goes to a single shard
+SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx
+ WHERE author_id = 1
+ GROUP BY author_id
+ HAVING sum(word_count) > 1000
+ ORDER BY sum(word_count) DESC;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ author_id | corpus_size
+-----------+-------------
+ 1 | 35894
+(1 row)
+
+-- query is a single shard query but can't do shard pruning,
+-- not router-plannable due to <= and IN
+SELECT * FROM articles_hash_mx WHERE author_id <= 1;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3);
+NOTICE: cannot use shard pruning with ANY/ALL (array expression)
+HINT: Consider rewriting the expression with OR/AND clauses.
+NOTICE: cannot use shard pruning with ANY/ALL (array expression)
+HINT: Consider rewriting the expression with OR/AND clauses.
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 3 | 3 | asternal | 10480
+ 11 | 1 | alamo | 1347
+ 13 | 3 | aseyev | 2255
+ 21 | 1 | arcading | 5890
+ 23 | 3 | abhorring | 6799
+ 31 | 1 | athwartships | 7271
+ 33 | 3 | autochrome | 8180
+ 41 | 1 | aznavour | 11814
+ 43 | 3 | affixal | 12723
+(10 rows)
+
+-- queries with CTEs are supported
+WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1)
+SELECT * FROM first_author;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id
+----
+ 1
+ 11
+ 21
+ 31
+ 41
+(5 rows)
+
+-- queries with CTEs are supported even if CTE is not referenced inside query
+WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1)
+SELECT title FROM articles_hash_mx WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ title
+--------------
+ arsenous
+ alamo
+ arcading
+ athwartships
+ aznavour
+(5 rows)
+
+-- two CTE joins are supported if they go to the same worker
+WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
+id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 1)
+SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | id | title
+----+-----------+----+--------------
+ 1 | 1 | 1 | arsenous
+ 11 | 1 | 11 | alamo
+ 21 | 1 | 21 | arcading
+ 31 | 1 | 31 | athwartships
+ 41 | 1 | 41 | aznavour
+(5 rows)
+
+WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
+id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3)
+SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | id | title
+----+-----------+----+-------
+(0 rows)
+
+-- CTE joins are not supported if table shards are at different workers
+WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
+id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2)
+SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220104
+ERROR: could not run distributed query with complex table expressions
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- recursive CTEs are supported when filtered on partition column
+INSERT INTO company_employees_mx values(1, 1, 0);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(1, 2, 1);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(1, 3, 1);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(1, 4, 2);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(1, 5, 4);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(3, 1, 0);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(3, 15, 1);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+INSERT INTO company_employees_mx values(3, 3, 1);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+-- find employees at top 2 level within company hierarchy
+WITH RECURSIVE hierarchy as (
+ SELECT *, 1 AS level
+ FROM company_employees_mx
+ WHERE company_id = 1 and manager_id = 0
+ UNION
+ SELECT ce.*, (h.level+1)
+ FROM hierarchy h JOIN company_employees_mx ce
+ ON (h.employee_id = ce.manager_id AND
+ h.company_id = ce.company_id AND
+ ce.company_id = 1))
+SELECT * FROM hierarchy WHERE LEVEL <= 2;
+DEBUG: predicate pruning for shardId 1220108
+DEBUG: predicate pruning for shardId 1220109
+DEBUG: predicate pruning for shardId 1220110
+DEBUG: predicate pruning for shardId 1220108
+DEBUG: predicate pruning for shardId 1220109
+DEBUG: predicate pruning for shardId 1220110
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ company_id | employee_id | manager_id | level
+------------+-------------+------------+-------
+ 1 | 1 | 0 | 1
+ 1 | 2 | 1 | 2
+ 1 | 3 | 1 | 2
+(3 rows)
+
+-- query becomes not router plannble and gets rejected
+-- if filter on company is dropped
+WITH RECURSIVE hierarchy as (
+ SELECT *, 1 AS level
+ FROM company_employees_mx
+ WHERE company_id = 1 and manager_id = 0
+ UNION
+ SELECT ce.*, (h.level+1)
+ FROM hierarchy h JOIN company_employees_mx ce
+ ON (h.employee_id = ce.manager_id AND
+ h.company_id = ce.company_id))
+SELECT * FROM hierarchy WHERE LEVEL <= 2;
+DEBUG: predicate pruning for shardId 1220108
+DEBUG: predicate pruning for shardId 1220109
+DEBUG: predicate pruning for shardId 1220110
+ERROR: could not run distributed query with complex table expressions
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- logically wrong query, query involves different shards
+-- from the same table, but still router plannable due to
+-- shard being placed on the same worker.
+WITH RECURSIVE hierarchy as (
+ SELECT *, 1 AS level
+ FROM company_employees_mx
+ WHERE company_id = 3 and manager_id = 0
+ UNION
+ SELECT ce.*, (h.level+1)
+ FROM hierarchy h JOIN company_employees_mx ce
+ ON (h.employee_id = ce.manager_id AND
+ h.company_id = ce.company_id AND
+ ce.company_id = 2))
+SELECT * FROM hierarchy WHERE LEVEL <= 2;
+DEBUG: predicate pruning for shardId 1220107
+DEBUG: predicate pruning for shardId 1220109
+DEBUG: predicate pruning for shardId 1220110
+DEBUG: predicate pruning for shardId 1220107
+DEBUG: predicate pruning for shardId 1220108
+DEBUG: predicate pruning for shardId 1220109
+ERROR: could not run distributed query with complex table expressions
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- grouping sets are supported on single shard
+SELECT
+ id, substring(title, 2, 1) AS subtitle, count(*)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 3
+ GROUP BY GROUPING SETS ((id),(subtitle));
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | subtitle | count
+----+----------+-------
+ 1 | | 1
+ 3 | | 1
+ 11 | | 1
+ 13 | | 1
+ 21 | | 1
+ 23 | | 1
+ 31 | | 1
+ 33 | | 1
+ 41 | | 1
+ 43 | | 1
+ | b | 1
+ | f | 1
+ | l | 1
+ | r | 2
+ | s | 2
+ | t | 1
+ | u | 1
+ | z | 1
+(18 rows)
+
+-- grouping sets are not supported on multiple shards
+SELECT
+ id, substring(title, 2, 1) AS subtitle, count(*)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 2
+ GROUP BY GROUPING SETS ((id),(subtitle));
+ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- queries which involve functions in FROM clause are supported if it goes to a single worker.
+SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count | position
+----+-----------+--------------+------------+----------
+ 1 | 1 | arsenous | 9572 | 3
+ 11 | 1 | alamo | 1347 | 3
+ 21 | 1 | arcading | 5890 | 3
+ 31 | 1 | athwartships | 7271 | 3
+ 41 | 1 | aznavour | 11814 | 3
+(5 rows)
+
+SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count | position
+----+-----------+--------------+------------+----------
+ 1 | 1 | arsenous | 9572 | 3
+ 3 | 3 | asternal | 10480 | 3
+ 11 | 1 | alamo | 1347 | 3
+ 13 | 3 | aseyev | 2255 | 3
+ 21 | 1 | arcading | 5890 | 3
+ 23 | 3 | abhorring | 6799 | 3
+ 31 | 1 | athwartships | 7271 | 3
+ 33 | 3 | autochrome | 8180 | 3
+ 41 | 1 | aznavour | 11814 | 3
+ 43 | 3 | affixal | 12723 | 3
+(10 rows)
+
+-- they are not supported if multiple workers are involved
+SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2;
+ERROR: could not run distributed query with complex table expressions
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- subqueries are not supported in WHERE clause in Citus
+SELECT * FROM articles_hash_mx WHERE author_id IN (SELECT id FROM authors_hash_mx WHERE name LIKE '%a');
+ERROR: cannot plan queries that include both regular and partitioned relations
+SELECT * FROM articles_hash_mx WHERE author_id IN (SELECT author_id FROM articles_hash_mx WHERE author_id = 1 or author_id = 3);
+ERROR: could not run distributed query with join types other than INNER or OUTER JOINS
+HINT: Consider joining tables on partition column and have equal filter on joining columns.
+SELECT * FROM articles_hash_mx WHERE author_id = (SELECT 1);
+ERROR: could not run distributed query with subquery outside the FROM clause
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- subqueries are supported in FROM clause but they are not router plannable
+SELECT articles_hash_mx.id,test.word_count
+FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id
+ORDER BY articles_hash_mx.id;
+DEBUG: join prunable for task partitionId 0 and 1
+DEBUG: join prunable for task partitionId 0 and 2
+DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 1 and 0
+DEBUG: join prunable for task partitionId 1 and 2
+DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 2 and 0
+DEBUG: join prunable for task partitionId 2 and 1
+DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 3 and 0
+DEBUG: join prunable for task partitionId 3 and 1
+DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: pruning merge fetch taskId 1
+DETAIL: Creating dependency on merge taskId 5
+DEBUG: pruning merge fetch taskId 2
+DETAIL: Creating dependency on merge taskId 5
+DEBUG: pruning merge fetch taskId 4
+DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 5
+DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 7
+DETAIL: Creating dependency on merge taskId 11
+DEBUG: pruning merge fetch taskId 8
+DETAIL: Creating dependency on merge taskId 11
+DEBUG: pruning merge fetch taskId 10
+DETAIL: Creating dependency on merge taskId 14
+DEBUG: pruning merge fetch taskId 11
+DETAIL: Creating dependency on merge taskId 14
+ERROR: cannot use real time executor with repartition jobs
+HINT: Set citus.task_executor_type to "task-tracker".
+SELECT articles_hash_mx.id,test.word_count
+FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test
+WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1
+ORDER BY articles_hash_mx.id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: join prunable for task partitionId 0 and 1
+DEBUG: join prunable for task partitionId 0 and 2
+DEBUG: join prunable for task partitionId 0 and 3
+DEBUG: join prunable for task partitionId 1 and 0
+DEBUG: join prunable for task partitionId 1 and 2
+DEBUG: join prunable for task partitionId 1 and 3
+DEBUG: join prunable for task partitionId 2 and 0
+DEBUG: join prunable for task partitionId 2 and 1
+DEBUG: join prunable for task partitionId 2 and 3
+DEBUG: join prunable for task partitionId 3 and 0
+DEBUG: join prunable for task partitionId 3 and 1
+DEBUG: join prunable for task partitionId 3 and 2
+DEBUG: pruning merge fetch taskId 1
+DETAIL: Creating dependency on merge taskId 3
+DEBUG: pruning merge fetch taskId 2
+DETAIL: Creating dependency on merge taskId 5
+DEBUG: pruning merge fetch taskId 4
+DETAIL: Creating dependency on merge taskId 5
+DEBUG: pruning merge fetch taskId 5
+DETAIL: Creating dependency on merge taskId 8
+DEBUG: pruning merge fetch taskId 7
+DETAIL: Creating dependency on merge taskId 7
+DEBUG: pruning merge fetch taskId 8
+DETAIL: Creating dependency on merge taskId 11
+DEBUG: pruning merge fetch taskId 10
+DETAIL: Creating dependency on merge taskId 9
+DEBUG: pruning merge fetch taskId 11
+DETAIL: Creating dependency on merge taskId 14
+ERROR: cannot use real time executor with repartition jobs
+HINT: Set citus.task_executor_type to "task-tracker".
+-- subqueries are not supported in SELECT clause
+SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1)
+ AS special_price FROM articles_hash_mx a;
+ERROR: could not run distributed query with subquery outside the FROM clause
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- simple lookup query
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- below query hits a single shard, router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 OR author_id = 17;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- below query hits two shards, not router plannable + not router executable
+-- handled by real-time executor
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 OR author_id = 18;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- rename the output columns
+SELECT id as article_id, word_count * id as random_value
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ article_id | random_value
+------------+--------------
+ 1 | 9572
+ 11 | 14817
+ 21 | 123690
+ 31 | 225401
+ 41 | 484374
+(5 rows)
+
+-- we can push down co-located joins to a single worker
+SELECT a.author_id as first_author, b.word_count as second_word_count
+ FROM articles_hash_mx a, articles_hash_mx b
+ WHERE a.author_id = 10 and a.author_id = b.author_id
+ LIMIT 3;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ first_author | second_word_count
+--------------+-------------------
+ 10 | 17277
+ 10 | 1820
+ 10 | 6363
+(3 rows)
+
+-- following join is router plannable since the same worker
+-- has both shards
+SELECT a.author_id as first_author, b.word_count as second_word_count
+ FROM articles_hash_mx a, articles_single_shard_hash_mx b
+ WHERE a.author_id = 10 and a.author_id = b.author_id
+ LIMIT 3;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ first_author | second_word_count
+--------------+-------------------
+ 10 | 19519
+ 10 | 19519
+ 10 | 19519
+(3 rows)
+
+
+-- following join is not router plannable since there are no
+-- workers containing both shards, added a CTE to make this fail
+-- at logical planner
+WITH single_shard as (SELECT * FROM articles_single_shard_hash_mx)
+SELECT a.author_id as first_author, b.word_count as second_word_count
+ FROM articles_hash_mx a, single_shard b
+ WHERE a.author_id = 2 and a.author_id = b.author_id
+ LIMIT 3;
+DEBUG: predicate pruning for shardId 1220104
+DEBUG: Found no worker with all shard placements
+ERROR: could not run distributed query with complex table expressions
+-- single shard select with limit is router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ LIMIT 3;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+(3 rows)
+
+-- single shard select with limit + offset is router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ LIMIT 2
+ OFFSET 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+(2 rows)
+
+-- single shard select with limit + offset + order by is router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id desc
+ LIMIT 2
+ OFFSET 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 31 | 1 | athwartships | 7271
+ 21 | 1 | arcading | 5890
+(2 rows)
+
+
+-- single shard select with group by on non-partition column is router plannable
+SELECT id
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ GROUP BY id
+ ORDER BY id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id
+----
+ 1
+ 11
+ 21
+ 31
+ 41
+(5 rows)
+
+-- single shard select with distinct is router plannable
+SELECT distinct id
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id
+----
+ 1
+ 11
+ 21
+ 31
+ 41
+(5 rows)
+
+-- single shard aggregate is router plannable
+SELECT avg(word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 2;
+DEBUG: predicate pruning for shardId 1220104
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ avg
+--------------------
+ 12356.400000000000
+(1 row)
+
+-- max, min, sum, count are router plannable on single shard
+SELECT max(word_count) as max, min(word_count) as min,
+ sum(word_count) as sum, count(word_count) as cnt
+ FROM articles_hash_mx
+ WHERE author_id = 2;
+DEBUG: predicate pruning for shardId 1220104
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ max | min | sum | cnt
+-------+------+-------+-----
+ 18185 | 2728 | 61782 | 5
+(1 row)
+
+-- queries with aggregates and group by supported on single shard
+SELECT max(word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ GROUP BY author_id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ max
+-------
+ 11814
+(1 row)
+
+
+-- router plannable union queries are supported
+(SELECT * FROM articles_hash_mx WHERE author_id = 1)
+UNION
+(SELECT * FROM articles_hash_mx WHERE author_id = 3);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 3 | 3 | asternal | 10480
+ 43 | 3 | affixal | 12723
+ 23 | 3 | abhorring | 6799
+ 13 | 3 | aseyev | 2255
+ 11 | 1 | alamo | 1347
+ 41 | 1 | aznavour | 11814
+ 1 | 1 | arsenous | 9572
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 33 | 3 | autochrome | 8180
+(10 rows)
+
+SELECT * FROM (
+ (SELECT * FROM articles_hash_mx WHERE author_id = 1)
+ UNION
+ (SELECT * FROM articles_hash_mx WHERE author_id = 3)) uu;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 3 | 3 | asternal | 10480
+ 43 | 3 | affixal | 12723
+ 23 | 3 | abhorring | 6799
+ 13 | 3 | aseyev | 2255
+ 11 | 1 | alamo | 1347
+ 41 | 1 | aznavour | 11814
+ 1 | 1 | arsenous | 9572
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 33 | 3 | autochrome | 8180
+(10 rows)
+
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1)
+UNION
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ left
+------
+ a
+(1 row)
+
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1)
+INTERSECT
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ left
+------
+ a
+(1 row)
+
+(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1)
+EXCEPT
+(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ left
+------
+ at
+ az
+ ar
+ al
+(4 rows)
+
+-- union queries are not supported if not router plannable
+-- there is an inconsistency on shard pruning between
+-- ubuntu/mac disabling log messages for this queries only
+SET client_min_messages to 'NOTICE';
+(SELECT * FROM articles_hash_mx WHERE author_id = 1)
+UNION
+(SELECT * FROM articles_hash_mx WHERE author_id = 2);
+ERROR: could not run distributed query with UNION, INTERSECT, or EXCEPT
+HINT: Consider using an equality filter on the distributed table's partition column.
+SELECT * FROM (
+ (SELECT * FROM articles_hash_mx WHERE author_id = 1)
+ UNION
+ (SELECT * FROM articles_hash_mx WHERE author_id = 2)) uu;
+ERROR: cannot perform distributed planning on this query
+DETAIL: Subqueries without group by clause are not supported yet
+-- error out for queries with repartition jobs
+SELECT *
+ FROM articles_hash_mx a, articles_hash_mx b
+ WHERE a.id = b.id AND a.author_id = 1;
+ERROR: cannot use real time executor with repartition jobs
+HINT: Set citus.task_executor_type to "task-tracker".
+-- queries which hit more than 1 shards are not router plannable or executable
+-- handled by real-time executor
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id >= 1 AND author_id <= 3;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 3 | 3 | asternal | 10480
+ 11 | 1 | alamo | 1347
+ 13 | 3 | aseyev | 2255
+ 21 | 1 | arcading | 5890
+ 23 | 3 | abhorring | 6799
+ 31 | 1 | athwartships | 7271
+ 33 | 3 | autochrome | 8180
+ 41 | 1 | aznavour | 11814
+ 43 | 3 | affixal | 12723
+ 2 | 2 | abducing | 13642
+ 12 | 2 | archiblast | 18185
+ 22 | 2 | antipope | 2728
+ 32 | 2 | amazon | 11342
+ 42 | 2 | ausable | 15885
+(15 rows)
+
+SET citus.task_executor_type TO 'real-time';
+-- Test various filtering options for router plannable check
+SET client_min_messages to 'DEBUG2';
+-- this is definitely single shard
+-- and router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and author_id >= 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- not router plannable due to or
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 or id = 1;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and (id = 1 or id = 41);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 1 | 1 | arsenous | 9572
+ 41 | 1 | aznavour | 11814
+(2 rows)
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and (id = random()::int * 0);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+-------+------------
+(0 rows)
+
+-- not router plannable due to function call on the right side
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = (random()::int * 0 + 1);
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- not router plannable due to or
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 or id = 1;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+
+-- router plannable due to abs(-1) getting converted to 1 by postgresql
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = abs(-1);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- not router plannable due to abs() function
+SELECT *
+ FROM articles_hash_mx
+ WHERE 1 = abs(author_id);
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- not router plannable due to abs() function
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = abs(author_id - 2);
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- router plannable, function on different field
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and (id = abs(id - 2));
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 1 | 1 | arsenous | 9572
+(1 row)
+
+-- not router plannable due to is true
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) is true;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- router plannable, (boolean expression) = true is collapsed to (boolean expression)
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) = true;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- router plannable, between operator is on another column
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) and id between 0 and 20;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+(2 rows)
+
+-- router plannable, partition column expression is and'ed to rest
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s';
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 31 | 1 | athwartships | 7271
+(2 rows)
+
+-- router plannable, order is changed
+SELECT *
+ FROM articles_hash_mx
+ WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 31 | 1 | athwartships | 7271
+(2 rows)
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE (title like '%s' or title like 'a%') and (author_id = 1);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 41 | 1 | aznavour | 11814
+(3 rows)
+
+-- window functions are supported if query is router plannable
+SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
+ FROM articles_hash_mx
+ WHERE author_id = 5;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ prev | title | word_count
+----------+----------+------------
+ | afrasia | 864
+ afrasia | adversa | 3164
+ adversa | antehall | 7707
+ antehall | aminate | 9089
+ aminate | aruru | 11389
+(5 rows)
+
+SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
+ FROM articles_hash_mx
+ WHERE author_id = 5
+ ORDER BY word_count DESC;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ prev | title | word_count
+----------+----------+------------
+ aminate | aruru | 11389
+ antehall | aminate | 9089
+ adversa | antehall | 7707
+ afrasia | adversa | 3164
+ | afrasia | 864
+(5 rows)
+
+SELECT id, MIN(id) over (order by word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | min
+----+-----
+ 11 | 11
+ 21 | 11
+ 31 | 11
+ 1 | 1
+ 41 | 1
+(5 rows)
+
+SELECT id, word_count, AVG(word_count) over (order by word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | word_count | avg
+----+------------+-----------------------
+ 11 | 1347 | 1347.0000000000000000
+ 21 | 5890 | 3618.5000000000000000
+ 31 | 7271 | 4836.0000000000000000
+ 1 | 9572 | 6020.0000000000000000
+ 41 | 11814 | 7178.8000000000000000
+(5 rows)
+
+SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ word_count | rank
+------------+------
+ 1347 | 1
+ 5890 | 2
+ 7271 | 3
+ 9572 | 4
+ 11814 | 5
+(5 rows)
+
+-- window functions are not supported for not router plannable queries
+SELECT id, MIN(id) over (order by word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 2;
+ERROR: could not run distributed query with window functions
+HINT: Consider using an equality filter on the distributed table's partition column.
+SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
+ FROM articles_hash_mx
+ WHERE author_id = 5 or author_id = 2;
+ERROR: could not run distributed query with window functions
+HINT: Consider using an equality filter on the distributed table's partition column.
+-- complex query hitting a single shard
+SELECT
+ count(DISTINCT CASE
+ WHEN
+ word_count > 100
+ THEN
+ id
+ ELSE
+ NULL
+ END) as c
+ FROM
+ articles_hash_mx
+ WHERE
+ author_id = 5;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ c
+---
+ 5
+(1 row)
+
+-- same query is not router plannable if hits multiple shards
+SELECT
+ count(DISTINCT CASE
+ WHEN
+ word_count > 100
+ THEN
+ id
+ ELSE
+ NULL
+ END) as c
+ FROM
+ articles_hash_mx
+ GROUP BY
+ author_id;
+ c
+---
+ 4
+ 5
+ 5
+ 5
+ 5
+ 5
+ 5
+ 5
+ 5
+ 5
+(10 rows)
+
+-- queries inside transactions can be router plannable
+BEGIN;
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+END;
+-- cursor queries are router plannable
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+FETCH test_cursor;
+ id | author_id | title | word_count
+----+-----------+----------+------------
+ 1 | 1 | arsenous | 9572
+(1 row)
+
+FETCH test_cursor;
+ id | author_id | title | word_count
+----+-----------+-------+------------
+ 11 | 1 | alamo | 1347
+(1 row)
+
+END;
+-- queries inside copy can be router plannable
+COPY (
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id) TO STDOUT;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+1 1 arsenous 9572
+11 1 alamo 1347
+21 1 arcading 5890
+31 1 athwartships 7271
+41 1 aznavour 11814
+
+-- table creation queries inside can be router plannable
+CREATE TEMP TABLE temp_articles_hash_mx as
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+-- router plannable queries may include filter for aggragates
+SELECT count(*), count(*) FILTER (WHERE id < 3)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ count | count
+-------+-------
+ 5 | 1
+(1 row)
+
+-- non-router plannable queries support filters as well
+SELECT count(*), count(*) FILTER (WHERE id < 3)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 2;
+ count | count
+-------+-------
+ 10 | 2
+(1 row)
+
+-- prepare queries can be router plannable
+PREPARE author_1_articles as
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+EXECUTE author_1_articles;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- parametric prepare queries can be router plannable
+PREPARE author_articles(int) as
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = $1;
+EXECUTE author_articles(1);
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+-- queries inside plpgsql functions could be router plannable
+CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$
+DECLARE
+ max_id integer;
+BEGIN
+ SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1
+ into max_id;
+ return max_id;
+END;
+$$ LANGUAGE plpgsql;
+SELECT author_articles_max_id();
+DEBUG: predicate pruning for shardId 1220105
+CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_max_id() line 5 at SQL statement
+DEBUG: predicate pruning for shardId 1220105
+CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_max_id() line 5 at SQL statement
+DEBUG: predicate pruning for shardId 1220105
+CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_max_id() line 5 at SQL statement
+DEBUG: Creating router plan
+CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_max_id() line 5 at SQL statement
+DEBUG: Plan is router executable
+CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_max_id() line 5 at SQL statement
+ author_articles_max_id
+------------------------
+ 41
+(1 row)
+
+-- plpgsql function that return query results are not router plannable
+CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$
+DECLARE
+BEGIN
+ RETURN QUERY
+ SELECT ah.id, ah.word_count
+ FROM articles_hash_mx ah
+ WHERE author_id = 1;
+
+END;
+$$ LANGUAGE plpgsql;
+SELECT * FROM author_articles_id_word_count();
+DEBUG: predicate pruning for shardId 1220105
+CONTEXT: SQL statement "SELECT ah.id, ah.word_count
+ FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY
+DEBUG: Creating router plan
+CONTEXT: SQL statement "SELECT ah.id, ah.word_count
+ FROM articles_hash_mx ah
+ WHERE author_id = 1"
+PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY
+DEBUG: Plan is router executable
+CONTEXT: PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY
+ id | word_count
+----+------------
+ 1 | 9572
+ 11 | 1347
+ 21 | 5890
+ 31 | 7271
+ 41 | 11814
+(5 rows)
+
+-- materialized views can be created for router plannable queries
+CREATE MATERIALIZED VIEW mv_articles_hash_mx AS
+ SELECT * FROM articles_hash_mx WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+SELECT * FROM mv_articles_hash_mx;
+ id | author_id | title | word_count
+----+-----------+--------------+------------
+ 1 | 1 | arsenous | 9572
+ 11 | 1 | alamo | 1347
+ 21 | 1 | arcading | 5890
+ 31 | 1 | athwartships | 7271
+ 41 | 1 | aznavour | 11814
+(5 rows)
+
+SET client_min_messages to 'INFO';
+DROP MATERIALIZED VIEW mv_articles_hash_mx;
+SET client_min_messages to 'DEBUG2';
+CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS
+ SELECT * FROM articles_hash_mx WHERE author_id in (1,2);
+NOTICE: cannot use shard pruning with ANY/ALL (array expression)
+HINT: Consider rewriting the expression with OR/AND clauses.
+NOTICE: cannot use shard pruning with ANY/ALL (array expression)
+HINT: Consider rewriting the expression with OR/AND clauses.
+ERROR: cannot create temporary table within security-restricted operation
+
+-- router planner/executor is disabled for task-tracker executor
+-- following query is router plannable, but router planner is disabled
+-- TODO: Uncomment once we fix task-tracker issue
+--SET citus.task_executor_type to 'task-tracker';
+--SELECT id
+-- FROM articles_hash_mx
+-- WHERE author_id = 1;
+-- insert query is router plannable even under task-tracker
+INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814);
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+-- verify insert is successfull (not router plannable and executable)
+SELECT id
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+DEBUG: predicate pruning for shardId 1220105
+DEBUG: Creating router plan
+DEBUG: Plan is router executable
+ id
+----
+ 1
+ 11
+ 21
+ 31
+ 41
+ 51
+(6 rows)
+
diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out
new file mode 100644
index 000000000..ed171c8c7
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_schema_support.out
@@ -0,0 +1,355 @@
+--
+-- MULTI_MX_SCHEMA_SUPPORT
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1210000;
+-- connect to a worker node and run some queries
+\c - - - :worker_1_port
+-- test very basic queries
+SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+-------------------------------------------------------------------------------------------------------------
+ 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+ 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+ 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+(4 rows)
+
+SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+-------------------------------------------------------------------------------------------------------------
+ 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+ 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+ 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+(4 rows)
+
+-- test cursors
+SET search_path TO public;
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM nation_hash
+ WHERE n_nationkey = 1;
+FETCH test_cursor;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+------------------------------------------------------------------------------
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+(1 row)
+
+END;
+-- test with search_path is set
+SET search_path TO citus_mx_test_schema;
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM nation_hash
+ WHERE n_nationkey = 1;
+FETCH test_cursor;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+------------------------------------------------------------------------------
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+(1 row)
+
+END;
+-- test inserting to table in different schema
+SET search_path TO public;
+INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (100, 'TURKEY', 3);
+-- verify insertion
+SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+-----------
+ 100 | TURKEY | 3 |
+(1 row)
+
+-- test with search_path is set
+SET search_path TO citus_mx_test_schema;
+INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY', 3);
+-- verify insertion
+SELECT * FROM nation_hash WHERE n_nationkey = 101;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+-----------
+ 101 | GERMANY | 3 |
+(1 row)
+
+-- TODO: add UPDATE/DELETE/UPSERT
+-- test UDFs with schemas
+SET search_path TO public;
+-- UDF in public, table in a schema other than public, search_path is not set
+SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+ simpletestfunction
+--------------------
+ 152
+ 151
+ 37
+ 35
+ 34
+(5 rows)
+
+-- UDF in public, table in a schema other than public, search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+ simpletestfunction
+--------------------
+ 152
+ 151
+ 37
+ 35
+ 34
+(5 rows)
+
+-- UDF in schema, table in a schema other than public, search_path is not set
+SET search_path TO public;
+SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+ simpletestfunction2
+---------------------
+ 152
+ 151
+ 37
+ 35
+ 34
+(5 rows)
+
+-- UDF in schema, table in a schema other than public, search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+ simpletestfunction2
+---------------------
+ 152
+ 151
+ 37
+ 35
+ 34
+(5 rows)
+
+-- test operators with schema
+SET search_path TO public;
+-- test with search_path is not set
+SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+------------------------------------------------------------------------------
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+(1 row)
+
+-- test with search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+------------------------------------------------------------------------------
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+(1 row)
+
+SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+-------------------------------------------------------------------------------------------------------------
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+ 5 | ETHIOPIA | 0 | ven packages wake quickly. regu
+ 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai
+ 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+ 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d
+ 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+(6 rows)
+
+SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english;
+ n_comment
+-------------------------------------------------------------------------------------------------------------
+ al foxes promise slyly according to the regular accounts. bold requests alon
+ eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+ haggle. carefully final deposits detect slyly agai
+ ven packages wake quickly. regu
+ y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d
+ y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+(6 rows)
+
+SET search_path TO citus_mx_test_schema;
+SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC;
+ n_nationkey | n_name | n_regionkey | n_comment
+-------------+---------------------------+-------------+-------------------------------------------------------------------------------------------------------------
+ 5 | ETHIOPIA | 0 | ven packages wake quickly. regu
+ 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d
+ 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+ 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon
+ 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai
+(6 rows)
+
+SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english;
+ n_comment
+-------------------------------------------------------------------------------------------------------------
+ al foxes promise slyly according to the regular accounts. bold requests alon
+ eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+ haggle. carefully final deposits detect slyly agai
+ ven packages wake quickly. regu
+ y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d
+ y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+(6 rows)
+
+SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC;
+ n_nationkey | n_name | n_regionkey | n_comment | test_col
+-------------+---------------------------+-------------+----------------------------------------------------+----------
+ 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a)
+(1 row)
+
+--test with search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC;
+ n_nationkey | n_name | n_regionkey | n_comment | test_col
+-------------+---------------------------+-------------+----------------------------------------------------+----------
+ 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a)
+(1 row)
+
+-- check when search_path is public,
+-- join of two tables which are in different schemas,
+-- join on partition column
+SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+ count
+-------
+ 25
+(1 row)
+
+-- check when search_path is different than public,
+-- join of two tables which are in different schemas,
+-- join on partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+ count
+-------
+ 25
+(1 row)
+
+-- check when search_path is public,
+-- join of two tables which are in same schemas,
+-- join on partition column
+SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+ count
+-------
+ 25
+(1 row)
+
+-- check when search_path is different than public,
+-- join of two tables which are in same schemas,
+-- join on partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, nation_hash_2 n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+ count
+-------
+ 25
+(1 row)
+
+-- single repartition joins
+SET citus.task_executor_type TO "task-tracker";
+-- check when search_path is public,
+-- join of two tables which are in different schemas,
+-- join on partition column and non-partition column
+--SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_regionkey;
+ count
+-------
+ 25
+(1 row)
+
+-- check when search_path is different than public,
+-- join of two tables which are in different schemas,
+-- join on partition column and non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_regionkey;
+ count
+-------
+ 25
+(1 row)
+
+-- check when search_path is different than public,
+-- join of two tables which are in same schemas,
+-- join on partition column and non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, nation_hash_2 n2
+WHERE
+ n1.n_nationkey = n2.n_regionkey;
+ count
+-------
+ 25
+(1 row)
+
+-- hash repartition joins
+-- check when search_path is public,
+-- join of two tables which are in different schemas,
+-- join on non-partition column
+SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_regionkey = n2.n_regionkey;
+ count
+-------
+ 125
+(1 row)
+
+-- check when search_path is different than public,
+-- join of two tables which are in different schemas,
+-- join on non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_regionkey = n2.n_regionkey;
+ count
+-------
+ 125
+(1 row)
+
+-- check when search_path is different than public,
+-- join of two tables which are in same schemas,
+-- join on non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, nation_hash_2 n2
+WHERE
+ n1.n_regionkey = n2.n_regionkey;
+ count
+-------
+ 125
+(1 row)
+
+-- set task_executor back to real-time
+SET citus.task_executor_type TO "real-time";
diff --git a/src/test/regress/expected/multi_mx_tpch_query1.out b/src/test/regress/expected/multi_mx_tpch_query1.out
new file mode 100644
index 000000000..2c361750a
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query1.out
@@ -0,0 +1,111 @@
+--
+-- MULTI_MX_TPCH_QUERY1
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #1 from the TPC-H decision support benchmark
+SELECT
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate <= date '1998-12-01' - interval '90 days'
+GROUP BY
+ l_returnflag,
+ l_linestatus
+ORDER BY
+ l_returnflag,
+ l_linestatus;
+ l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order
+--------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+-------------
+ A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944
+ N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76
+ N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883
+ R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901
+(4 rows)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #1 from the TPC-H decision support benchmark
+SELECT
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate <= date '1998-12-01' - interval '90 days'
+GROUP BY
+ l_returnflag,
+ l_linestatus
+ORDER BY
+ l_returnflag,
+ l_linestatus;
+ l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order
+--------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+-------------
+ A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944
+ N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76
+ N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883
+ R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901
+(4 rows)
+
+ -- connect to the other node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #1 from the TPC-H decision support benchmark
+SELECT
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate <= date '1998-12-01' - interval '90 days'
+GROUP BY
+ l_returnflag,
+ l_linestatus
+ORDER BY
+ l_returnflag,
+ l_linestatus;
+ l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order
+--------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+-------------
+ A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944
+ N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76
+ N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883
+ R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901
+(4 rows)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query10.out b/src/test/regress/expected/multi_mx_tpch_query10.out
new file mode 100644
index 000000000..2a3bb08df
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query10.out
@@ -0,0 +1,186 @@
+--
+-- MULTI_MX_TPCH_QUERY10
+--
+-- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests,
+-- we don't set citus.large_table_shard_count here, and instead use the default value
+-- coming from postgresql.conf or multi_task_tracker_executor.conf.
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
+-- connect to master
+\c - - - :master_port
+SELECT
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx,
+ nation_mx
+WHERE
+ c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate >= date '1993-10-01'
+ AND o_orderdate < date '1993-10-01' + interval '3' month
+ AND l_returnflag = 'R'
+ AND c_nationkey = n_nationkey
+GROUP BY
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+ORDER BY
+ revenue DESC
+LIMIT 20;
+ c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment
+-----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+---------------------------------------------------------------------------------------------------------------------
+ 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi
+ 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto
+ 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole
+ 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious
+ 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl
+ 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft
+ 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru
+ 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl
+ 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu
+ 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily
+ 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully
+ 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious
+ 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote
+ 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack
+ 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou
+ 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro
+ 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit
+ 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl
+ 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep
+ 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos.
+(20 rows)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
+SELECT
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx,
+ nation_mx
+WHERE
+ c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate >= date '1993-10-01'
+ AND o_orderdate < date '1993-10-01' + interval '3' month
+ AND l_returnflag = 'R'
+ AND c_nationkey = n_nationkey
+GROUP BY
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+ORDER BY
+ revenue DESC
+LIMIT 20;
+ c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment
+-----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+---------------------------------------------------------------------------------------------------------------------
+ 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi
+ 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto
+ 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole
+ 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious
+ 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl
+ 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft
+ 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru
+ 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl
+ 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu
+ 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily
+ 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully
+ 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious
+ 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote
+ 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack
+ 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou
+ 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro
+ 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit
+ 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl
+ 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep
+ 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos.
+(20 rows)
+
+-- connect to the other worker
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
+SELECT
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx,
+ nation_mx
+WHERE
+ c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate >= date '1993-10-01'
+ AND o_orderdate < date '1993-10-01' + interval '3' month
+ AND l_returnflag = 'R'
+ AND c_nationkey = n_nationkey
+GROUP BY
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+ORDER BY
+ revenue DESC
+LIMIT 20;
+ c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment
+-----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+---------------------------------------------------------------------------------------------------------------------
+ 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi
+ 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto
+ 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole
+ 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious
+ 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl
+ 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft
+ 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru
+ 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl
+ 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu
+ 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily
+ 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully
+ 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious
+ 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote
+ 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack
+ 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou
+ 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro
+ 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit
+ 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl
+ 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep
+ 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos.
+(20 rows)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query12.out b/src/test/regress/expected/multi_mx_tpch_query12.out
new file mode 100644
index 000000000..f9f91e370
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query12.out
@@ -0,0 +1,126 @@
+--
+-- MULTI_MX_TPCH_QUERY12
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #12 from the TPC-H decision support benchmark
+SELECT
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ OR o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ AND o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) AS low_line_count
+FROM
+ orders_mx,
+ lineitem_mx
+WHERE
+ o_orderkey = l_orderkey
+ AND l_shipmode in ('MAIL', 'SHIP')
+ AND l_commitdate < l_receiptdate
+ AND l_shipdate < l_commitdate
+ AND l_receiptdate >= date '1994-01-01'
+ AND l_receiptdate < date '1994-01-01' + interval '1' year
+GROUP BY
+ l_shipmode
+ORDER BY
+ l_shipmode;
+ l_shipmode | high_line_count | low_line_count
+------------+-----------------+----------------
+ MAIL | 11 | 15
+ SHIP | 11 | 19
+(2 rows)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #12 from the TPC-H decision support benchmark
+SELECT
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ OR o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ AND o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) AS low_line_count
+FROM
+ orders_mx,
+ lineitem_mx
+WHERE
+ o_orderkey = l_orderkey
+ AND l_shipmode in ('MAIL', 'SHIP')
+ AND l_commitdate < l_receiptdate
+ AND l_shipdate < l_commitdate
+ AND l_receiptdate >= date '1994-01-01'
+ AND l_receiptdate < date '1994-01-01' + interval '1' year
+GROUP BY
+ l_shipmode
+ORDER BY
+ l_shipmode;
+ l_shipmode | high_line_count | low_line_count
+------------+-----------------+----------------
+ MAIL | 11 | 15
+ SHIP | 11 | 19
+(2 rows)
+
+-- connect to the other worker node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #12 from the TPC-H decision support benchmark
+SELECT
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ OR o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ AND o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) AS low_line_count
+FROM
+ orders_mx,
+ lineitem_mx
+WHERE
+ o_orderkey = l_orderkey
+ AND l_shipmode in ('MAIL', 'SHIP')
+ AND l_commitdate < l_receiptdate
+ AND l_shipdate < l_commitdate
+ AND l_receiptdate >= date '1994-01-01'
+ AND l_receiptdate < date '1994-01-01' + interval '1' year
+GROUP BY
+ l_shipmode
+ORDER BY
+ l_shipmode;
+ l_shipmode | high_line_count | low_line_count
+------------+-----------------+----------------
+ MAIL | 11 | 15
+ SHIP | 11 | 19
+(2 rows)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query14.out b/src/test/regress/expected/multi_mx_tpch_query14.out
new file mode 100644
index 000000000..dc31b46f9
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query14.out
@@ -0,0 +1,78 @@
+--
+-- MULTI_MX_TPCH_QUERY14
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #14 from the TPC-H decision support benchmark
+SELECT
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ l_partkey = p_partkey
+ AND l_shipdate >= date '1995-09-01'
+ AND l_shipdate < date '1995-09-01' + interval '1' year;
+ promo_revenue
+---------------------
+ 32.1126387112005225
+(1 row)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #14 from the TPC-H decision support benchmark
+SELECT
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ l_partkey = p_partkey
+ AND l_shipdate >= date '1995-09-01'
+ AND l_shipdate < date '1995-09-01' + interval '1' year;
+ promo_revenue
+---------------------
+ 32.1126387112005225
+(1 row)
+
+ -- connect to the other node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #14 from the TPC-H decision support benchmark
+SELECT
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ l_partkey = p_partkey
+ AND l_shipdate >= date '1995-09-01'
+ AND l_shipdate < date '1995-09-01' + interval '1' year;
+ promo_revenue
+---------------------
+ 32.1126387112005225
+(1 row)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query19.out b/src/test/regress/expected/multi_mx_tpch_query19.out
new file mode 100644
index 000000000..aca98444c
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query19.out
@@ -0,0 +1,129 @@
+--
+-- MULTI_MX_TPCH_QUERY19
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #19 from the TPC-H decision support benchmark. Note that we modified
+-- the query from its original to make it work on smaller data sets.
+SELECT
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15')
+ AND l_quantity >= 10
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#23' OR p_brand='Brand#24')
+ AND l_quantity >= 20
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35')
+ AND l_quantity >= 1
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ );
+ revenue
+-------------
+ 144747.0857
+(1 row)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #19 from the TPC-H decision support benchmark. Note that we modified
+-- the query from its original to make it work on smaller data sets.
+SELECT
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15')
+ AND l_quantity >= 10
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#23' OR p_brand='Brand#24')
+ AND l_quantity >= 20
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35')
+ AND l_quantity >= 1
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ );
+ revenue
+-------------
+ 144747.0857
+(1 row)
+
+-- connect to the other node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #19 from the TPC-H decision support benchmark. Note that we modified
+-- the query from its original to make it work on smaller data sets.
+SELECT
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15')
+ AND l_quantity >= 10
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#23' OR p_brand='Brand#24')
+ AND l_quantity >= 20
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35')
+ AND l_quantity >= 1
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ );
+ revenue
+-------------
+ 144747.0857
+(1 row)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query3.out b/src/test/regress/expected/multi_mx_tpch_query3.out
new file mode 100644
index 000000000..74da826ec
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query3.out
@@ -0,0 +1,144 @@
+--
+-- MULTI_MX_TPCH_QUERY3
+--
+-- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests,
+-- we don't set citus.large_table_shard_count here, and instead use the default value
+-- coming from postgresql.conf or multi_task_tracker_executor.conf.
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
+-- connect to the schema node
+\c - - - :master_port
+SELECT
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx
+WHERE
+ c_mktsegment = 'BUILDING'
+ AND c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate < date '1995-03-15'
+ AND l_shipdate > date '1995-03-15'
+GROUP BY
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+ORDER BY
+ revenue DESC,
+ o_orderdate;
+ l_orderkey | revenue | o_orderdate | o_shippriority
+------------+-------------+-------------+----------------
+ 1637 | 268170.6408 | 02-08-1995 | 0
+ 9696 | 252014.5497 | 02-20-1995 | 0
+ 10916 | 242749.1996 | 03-11-1995 | 0
+ 450 | 221012.3165 | 03-05-1995 | 0
+ 5347 | 198353.7942 | 02-22-1995 | 0
+ 10691 | 112800.1020 | 03-14-1995 | 0
+ 386 | 104975.2484 | 01-25-1995 | 0
+ 5765 | 88222.7556 | 12-15-1994 | 0
+ 4707 | 88143.7774 | 02-27-1995 | 0
+ 5312 | 83750.7028 | 02-24-1995 | 0
+ 5728 | 70101.6400 | 12-11-1994 | 0
+ 577 | 57986.6224 | 12-19-1994 | 0
+ 12706 | 16636.6368 | 11-21-1994 | 0
+ 3844 | 8851.3200 | 12-29-1994 | 0
+ 11073 | 7433.6295 | 12-02-1994 | 0
+ 13924 | 3111.4970 | 12-20-1994 | 0
+(16 rows)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
+SELECT
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx
+WHERE
+ c_mktsegment = 'BUILDING'
+ AND c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate < date '1995-03-15'
+ AND l_shipdate > date '1995-03-15'
+GROUP BY
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+ORDER BY
+ revenue DESC,
+ o_orderdate;
+ l_orderkey | revenue | o_orderdate | o_shippriority
+------------+-------------+-------------+----------------
+ 1637 | 268170.6408 | 02-08-1995 | 0
+ 9696 | 252014.5497 | 02-20-1995 | 0
+ 10916 | 242749.1996 | 03-11-1995 | 0
+ 450 | 221012.3165 | 03-05-1995 | 0
+ 5347 | 198353.7942 | 02-22-1995 | 0
+ 10691 | 112800.1020 | 03-14-1995 | 0
+ 386 | 104975.2484 | 01-25-1995 | 0
+ 5765 | 88222.7556 | 12-15-1994 | 0
+ 4707 | 88143.7774 | 02-27-1995 | 0
+ 5312 | 83750.7028 | 02-24-1995 | 0
+ 5728 | 70101.6400 | 12-11-1994 | 0
+ 577 | 57986.6224 | 12-19-1994 | 0
+ 12706 | 16636.6368 | 11-21-1994 | 0
+ 3844 | 8851.3200 | 12-29-1994 | 0
+ 11073 | 7433.6295 | 12-02-1994 | 0
+ 13924 | 3111.4970 | 12-20-1994 | 0
+(16 rows)
+
+-- connect to the other node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
+SELECT
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx
+WHERE
+ c_mktsegment = 'BUILDING'
+ AND c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate < date '1995-03-15'
+ AND l_shipdate > date '1995-03-15'
+GROUP BY
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+ORDER BY
+ revenue DESC,
+ o_orderdate;
+ l_orderkey | revenue | o_orderdate | o_shippriority
+------------+-------------+-------------+----------------
+ 1637 | 268170.6408 | 02-08-1995 | 0
+ 9696 | 252014.5497 | 02-20-1995 | 0
+ 10916 | 242749.1996 | 03-11-1995 | 0
+ 450 | 221012.3165 | 03-05-1995 | 0
+ 5347 | 198353.7942 | 02-22-1995 | 0
+ 10691 | 112800.1020 | 03-14-1995 | 0
+ 386 | 104975.2484 | 01-25-1995 | 0
+ 5765 | 88222.7556 | 12-15-1994 | 0
+ 4707 | 88143.7774 | 02-27-1995 | 0
+ 5312 | 83750.7028 | 02-24-1995 | 0
+ 5728 | 70101.6400 | 12-11-1994 | 0
+ 577 | 57986.6224 | 12-19-1994 | 0
+ 12706 | 16636.6368 | 11-21-1994 | 0
+ 3844 | 8851.3200 | 12-29-1994 | 0
+ 11073 | 7433.6295 | 12-02-1994 | 0
+ 13924 | 3111.4970 | 12-20-1994 | 0
+(16 rows)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query6.out b/src/test/regress/expected/multi_mx_tpch_query6.out
new file mode 100644
index 000000000..7c8aa8333
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query6.out
@@ -0,0 +1,66 @@
+--
+-- MULTI_MX_TPCH_QUERY6
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #6 from the TPC-H decision support benchmark
+SELECT
+ sum(l_extendedprice * l_discount) as revenue
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1 year'
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+ revenue
+-------------
+ 243277.7858
+(1 row)
+
+-- connect to one of the worker nodes
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #6 from the TPC-H decision support benchmark
+SELECT
+ sum(l_extendedprice * l_discount) as revenue
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1 year'
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+ revenue
+-------------
+ 243277.7858
+(1 row)
+
+-- connect to the other worker node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+-- Change configuration to treat lineitem and orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #6 from the TPC-H decision support benchmark
+SELECT
+ sum(l_extendedprice * l_discount) as revenue
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1 year'
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+ revenue
+-------------
+ 243277.7858
+(1 row)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query7.out b/src/test/regress/expected/multi_mx_tpch_query7.out
new file mode 100644
index 000000000..fb6984c60
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query7.out
@@ -0,0 +1,156 @@
+--
+-- MULTI_MX_TPCH_QUERY7
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem AND orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #7 from the TPC-H decision support benchmark
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+FROM
+ (
+ SELECT
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year FROM l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = n1.n_nationkey
+ AND c_nationkey = n2.n_nationkey
+ AND (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) as shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+ supp_nation | cust_nation | l_year | revenue
+---------------------------+---------------------------+--------+-----------
+ GERMANY | FRANCE | 1995 | 2399.2948
+(1 row)
+
+-- connect one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
+-- Change configuration to treat lineitem AND orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #7 from the TPC-H decision support benchmark
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+FROM
+ (
+ SELECT
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year FROM l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = n1.n_nationkey
+ AND c_nationkey = n2.n_nationkey
+ AND (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) as shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+ supp_nation | cust_nation | l_year | revenue
+---------------------------+---------------------------+--------+-----------
+ GERMANY | FRANCE | 1995 | 2399.2948
+(1 row)
+
+-- connect to the other worker node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
+-- Change configuration to treat lineitem AND orders tables as large
+SET citus.large_table_shard_count TO 2;
+-- Query #7 from the TPC-H decision support benchmark
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+FROM
+ (
+ SELECT
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year FROM l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = n1.n_nationkey
+ AND c_nationkey = n2.n_nationkey
+ AND (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) as shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+ supp_nation | cust_nation | l_year | revenue
+---------------------------+---------------------------+--------+-----------
+ GERMANY | FRANCE | 1995 | 2399.2948
+(1 row)
+
diff --git a/src/test/regress/expected/multi_mx_tpch_query7_nested.out b/src/test/regress/expected/multi_mx_tpch_query7_nested.out
new file mode 100644
index 000000000..b5414f11d
--- /dev/null
+++ b/src/test/regress/expected/multi_mx_tpch_query7_nested.out
@@ -0,0 +1,183 @@
+--
+-- MULTI_MX_TPCH_QUERY7_NESTED
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
+-- connect to the schema node
+\c - - - :master_port
+-- Change configuration to treat lineitem AND orders tables AS large
+SET citus.large_table_shard_count TO 2;
+-- Query #7 from the TPC-H benchmark; modified to include sub-selects
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) AS revenue
+FROM
+ (
+ SELECT
+ supp_nation,
+ cust_nation,
+ extract(year FROM l_shipdate) AS l_year,
+ l_extendedprice * (1 - l_discount) AS volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ (
+ SELECT
+ n1.n_nationkey AS supp_nation_key,
+ n2.n_nationkey AS cust_nation_key,
+ n1.n_name AS supp_nation,
+ n2.n_name AS cust_nation
+ FROM
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ ) AS temp
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = supp_nation_key
+ AND c_nationkey = cust_nation_key
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) AS shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+ supp_nation | cust_nation | l_year | revenue
+---------------------------+---------------------------+--------+-----------
+ GERMANY | FRANCE | 1995 | 2399.2948
+(1 row)
+
+-- connect to one of the workers
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
+-- Change configuration to treat lineitem AND orders tables AS large
+SET citus.large_table_shard_count TO 2;
+-- Query #7 from the TPC-H benchmark; modified to include sub-selects
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) AS revenue
+FROM
+ (
+ SELECT
+ supp_nation,
+ cust_nation,
+ extract(year FROM l_shipdate) AS l_year,
+ l_extendedprice * (1 - l_discount) AS volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ (
+ SELECT
+ n1.n_nationkey AS supp_nation_key,
+ n2.n_nationkey AS cust_nation_key,
+ n1.n_name AS supp_nation,
+ n2.n_name AS cust_nation
+ FROM
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ ) AS temp
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = supp_nation_key
+ AND c_nationkey = cust_nation_key
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) AS shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+ supp_nation | cust_nation | l_year | revenue
+---------------------------+---------------------------+--------+-----------
+ GERMANY | FRANCE | 1995 | 2399.2948
+(1 row)
+
+-- connect to the schema node
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
+-- Change configuration to treat lineitem AND orders tables AS large
+SET citus.large_table_shard_count TO 2;
+-- Query #7 from the TPC-H benchmark; modified to include sub-selects
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) AS revenue
+FROM
+ (
+ SELECT
+ supp_nation,
+ cust_nation,
+ extract(year FROM l_shipdate) AS l_year,
+ l_extendedprice * (1 - l_discount) AS volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ (
+ SELECT
+ n1.n_nationkey AS supp_nation_key,
+ n2.n_nationkey AS cust_nation_key,
+ n1.n_name AS supp_nation,
+ n2.n_name AS cust_nation
+ FROM
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ ) AS temp
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = supp_nation_key
+ AND c_nationkey = cust_nation_key
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) AS shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+ supp_nation | cust_nation | l_year | revenue
+---------------------------+---------------------------+--------+-----------
+ GERMANY | FRANCE | 1995 | 2399.2948
+(1 row)
+
diff --git a/src/test/regress/input/multi_mx_copy_data.source b/src/test/regress/input/multi_mx_copy_data.source
new file mode 100644
index 000000000..5c801f8eb
--- /dev/null
+++ b/src/test/regress/input/multi_mx_copy_data.source
@@ -0,0 +1,33 @@
+--
+-- MULTI_MX_COPY_DATA
+--
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
+
+\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+
+SET search_path TO citus_mx_test_schema;
+\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+\COPY citus_mx_test_schema_join_1.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+\COPY citus_mx_test_schema_join_1.nation_hash_2 FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+\COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+
+-- now try loading data from worker node
+\c - - - :worker_1_port
+SET search_path TO public;
+
+\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
+\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
+
+\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
+\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
+
+-- and use second worker as well
+\c - - - :worker_2_port
+SET search_path TO public;
+
+\COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
+\COPY nation_mx FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
+\COPY part_mx FROM '@abs_srcdir@/data/part.data' with delimiter '|'
+\COPY supplier_mx FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
diff --git a/src/test/regress/multi_mx_schedule b/src/test/regress/multi_mx_schedule
new file mode 100644
index 000000000..e50f6a0d8
--- /dev/null
+++ b/src/test/regress/multi_mx_schedule
@@ -0,0 +1,29 @@
+# ----------
+# $Id$
+#
+# Regression tests for MX. This schedule runs tests for worker metadata
+# and MX tables. The tests mostly aim for running SQL queries from the worker
+# nodes and metadata operations from the schema node.
+#
+# Note that we use variant comparison files to test version dependent regression
+# test results. For more information:
+# http://www.postgresql.org/docs/9.5/static/regress-variant.html
+# ----------
+
+# ---
+# Tests around schema changes, these are run first, so there's no preexisting objects.
+# ---
+test: multi_extension
+test: multi_cluster_management
+
+test: multi_mx_create_table
+test: multi_mx_copy_data multi_mx_router_planner
+test: multi_mx_schema_support multi_mx_tpch_query1 multi_mx_tpch_query10
+test: multi_mx_tpch_query12 multi_mx_tpch_query14 multi_mx_tpch_query19
+test: multi_mx_tpch_query3 multi_mx_tpch_query6 multi_mx_tpch_query7
+test: multi_mx_tpch_query7_nested multi_mx_ddl
+test: multi_mx_repartition_udt_prepare
+test: multi_mx_repartition_join_w1 multi_mx_repartition_join_w2 multi_mx_repartition_udt_w1 multi_mx_repartition_udt_w2
+test: multi_mx_metadata multi_mx_modifications multi_mx_modifying_xacts
+test: multi_mx_explain
+test: multi_mx_reference_table
diff --git a/src/test/regress/output/multi_mx_copy_data.source b/src/test/regress/output/multi_mx_copy_data.source
new file mode 100644
index 000000000..ca5c291dc
--- /dev/null
+++ b/src/test/regress/output/multi_mx_copy_data.source
@@ -0,0 +1,25 @@
+--
+-- MULTI_MX_COPY_DATA
+--
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 290000;
+\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+SET search_path TO citus_mx_test_schema;
+\COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+\COPY citus_mx_test_schema_join_1.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+\COPY citus_mx_test_schema_join_1.nation_hash_2 FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+\COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
+-- now try loading data from worker node
+\c - - - :worker_1_port
+SET search_path TO public;
+\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
+\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
+\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
+\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
+-- and use second worker as well
+\c - - - :worker_2_port
+SET search_path TO public;
+\COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
+\COPY nation_mx FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
+\COPY part_mx FROM '@abs_srcdir@/data/part.data' with delimiter '|'
+\COPY supplier_mx FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
diff --git a/src/test/regress/sql/.gitignore b/src/test/regress/sql/.gitignore
index 73352d1a2..a41eca963 100644
--- a/src/test/regress/sql/.gitignore
+++ b/src/test/regress/sql/.gitignore
@@ -15,3 +15,4 @@
/multi_subquery_0.sql
/worker_copy.sql
/multi_complex_count_distinct.sql
+/multi_mx_copy_data.sql
diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql
new file mode 100644
index 000000000..989aaccd5
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_create_table.sql
@@ -0,0 +1,444 @@
+--
+-- MULTI_MX_CREATE_TABLE
+--
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1220000;
+
+SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
+SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
+
+-- create schema to test schema support
+CREATE SCHEMA citus_mx_test_schema;
+CREATE SCHEMA citus_mx_test_schema_join_1;
+CREATE SCHEMA citus_mx_test_schema_join_2;
+
+-- create UDFs that we're going to use in our tests
+SET search_path TO public;
+CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+
+SET search_path TO citus_mx_test_schema;
+CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+
+CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
+
+CREATE OPERATOR citus_mx_test_schema.=== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==,
+ HASHES, MERGES
+);
+
+SET search_path TO public;
+CREATE COLLATION citus_mx_test_schema.english FROM "en_US";
+
+CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
+CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
+
+-- now create required stuff in the worker 1
+\c - - - :worker_1_port
+
+-- create schema to test schema support
+CREATE SCHEMA citus_mx_test_schema;
+CREATE SCHEMA citus_mx_test_schema_join_1;
+CREATE SCHEMA citus_mx_test_schema_join_2;
+
+-- create UDFs in worker node
+CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+
+SET search_path TO citus_mx_test_schema;
+CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+
+CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
+
+-- create operator
+CREATE OPERATOR citus_mx_test_schema.=== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==,
+ HASHES, MERGES
+);
+
+SET search_path TO public;
+CREATE COLLATION citus_mx_test_schema.english FROM "en_US";
+
+SET search_path TO public;
+CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
+CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
+
+-- now create required stuff in the worker 2
+\c - - - :worker_2_port
+
+-- create schema to test schema support
+CREATE SCHEMA citus_mx_test_schema;
+CREATE SCHEMA citus_mx_test_schema_join_1;
+CREATE SCHEMA citus_mx_test_schema_join_2;
+
+
+-- create UDF
+CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+
+SET search_path TO citus_mx_test_schema;
+CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer)
+ RETURNS text AS
+$$
+DECLARE
+ strresult text;
+BEGIN
+ RETURN theValue * 3 / 2 + 1;
+END;
+$$
+LANGUAGE 'plpgsql' IMMUTABLE;
+
+CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
+
+-- create operator
+CREATE OPERATOR citus_mx_test_schema.=== (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==,
+ HASHES, MERGES
+);
+
+
+SET search_path TO public;
+CREATE COLLATION citus_mx_test_schema.english FROM "en_US";
+
+SET search_path TO public;
+CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
+CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
+
+-- connect back to the master, and do some more tests
+\c - - - :master_port
+
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+SET search_path TO public;
+
+CREATE TABLE nation_hash(
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152)
+);
+
+SET citus.shard_count TO 16;
+SELECT create_distributed_table('nation_hash', 'n_nationkey');
+
+SET search_path TO citus_mx_test_schema;
+
+-- create mx tables that we're going to use for our tests
+CREATE TABLE citus_mx_test_schema.nation_hash(
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152)
+);
+
+SELECT create_distributed_table('nation_hash', 'n_nationkey');
+
+CREATE TABLE citus_mx_test_schema_join_1.nation_hash (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+
+SET citus.shard_count TO 4;
+SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey');
+
+CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+
+SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey');
+
+SET search_path TO citus_mx_test_schema_join_2;
+CREATE TABLE nation_hash (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+
+SELECT create_distributed_table('nation_hash', 'n_nationkey');
+
+SET search_path TO citus_mx_test_schema;
+CREATE TABLE nation_hash_collation_search_path(
+ n_nationkey integer not null,
+ n_name char(25) not null COLLATE english,
+ n_regionkey integer not null,
+ n_comment varchar(152)
+);
+SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey');
+
+\COPY nation_hash_collation_search_path FROM STDIN with delimiter '|';
+0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
+1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
+2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
+3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold
+4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d
+5|ETHIOPIA|0|ven packages wake quickly. regu
+\.
+
+CREATE TABLE citus_mx_test_schema.nation_hash_composite_types(
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152),
+ test_col citus_mx_test_schema.new_composite_type
+);
+
+SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey');
+
+-- insert some data to verify composite type queries
+\COPY citus_mx_test_schema.nation_hash_composite_types FROM STDIN with delimiter '|';
+0|ALGERIA|0|haggle. carefully final deposits detect slyly agai|(a,a)
+1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|(a,b)
+2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |(a,c)
+3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold|(a,d)
+4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d|(a,e)
+5|ETHIOPIA|0|ven packages wake quickly. regu|(a,f)
+\.
+
+-- now create tpch tables
+-- Create new table definitions for use in testing in distributed planning and
+-- execution functionality. Also create indexes to boost performance.
+SET search_path TO public;
+
+CREATE TABLE lineitem_mx (
+ l_orderkey bigint not null,
+ l_partkey integer not null,
+ l_suppkey integer not null,
+ l_linenumber integer not null,
+ l_quantity decimal(15, 2) not null,
+ l_extendedprice decimal(15, 2) not null,
+ l_discount decimal(15, 2) not null,
+ l_tax decimal(15, 2) not null,
+ l_returnflag char(1) not null,
+ l_linestatus char(1) not null,
+ l_shipdate date not null,
+ l_commitdate date not null,
+ l_receiptdate date not null,
+ l_shipinstruct char(25) not null,
+ l_shipmode char(10) not null,
+ l_comment varchar(44) not null,
+ PRIMARY KEY(l_orderkey, l_linenumber) );
+
+SET citus.shard_count TO 16;
+SELECT create_distributed_table('lineitem_mx', 'l_orderkey');
+
+CREATE INDEX lineitem_mx_time_index ON lineitem_mx (l_shipdate);
+
+CREATE TABLE orders_mx (
+ o_orderkey bigint not null,
+ o_custkey integer not null,
+ o_orderstatus char(1) not null,
+ o_totalprice decimal(15,2) not null,
+ o_orderdate date not null,
+ o_orderpriority char(15) not null,
+ o_clerk char(15) not null,
+ o_shippriority integer not null,
+ o_comment varchar(79) not null,
+ PRIMARY KEY(o_orderkey) );
+SELECT create_distributed_table('orders_mx', 'o_orderkey');
+
+CREATE TABLE customer_mx (
+ c_custkey integer not null,
+ c_name varchar(25) not null,
+ c_address varchar(40) not null,
+ c_nationkey integer not null,
+ c_phone char(15) not null,
+ c_acctbal decimal(15,2) not null,
+ c_mktsegment char(10) not null,
+ c_comment varchar(117) not null);
+
+SET citus.shard_count TO 1;
+SELECT create_distributed_table('customer_mx', 'c_custkey');
+
+CREATE TABLE nation_mx (
+ n_nationkey integer not null,
+ n_name char(25) not null,
+ n_regionkey integer not null,
+ n_comment varchar(152));
+
+SELECT create_distributed_table('nation_mx', 'n_nationkey');
+
+CREATE TABLE part_mx (
+ p_partkey integer not null,
+ p_name varchar(55) not null,
+ p_mfgr char(25) not null,
+ p_brand char(10) not null,
+ p_type varchar(25) not null,
+ p_size integer not null,
+ p_container char(10) not null,
+ p_retailprice decimal(15,2) not null,
+ p_comment varchar(23) not null);
+
+SELECT create_distributed_table('part_mx', 'p_partkey');
+
+CREATE TABLE supplier_mx
+(
+ s_suppkey integer not null,
+ s_name char(25) not null,
+ s_address varchar(40) not null,
+ s_nationkey integer,
+ s_phone char(15) not null,
+ s_acctbal decimal(15,2) not null,
+ s_comment varchar(101) not null
+);
+
+SELECT create_distributed_table('supplier_mx', 's_suppkey');
+
+-- Create test table for ddl
+CREATE TABLE mx_ddl_table (
+ key int primary key,
+ value int
+);
+
+SET citus.shard_count TO 4;
+SELECT create_distributed_table('mx_ddl_table', 'key', 'hash');
+
+-- Load some test data
+COPY mx_ddl_table (key, value) FROM STDIN WITH (FORMAT 'csv');
+1,10
+2,11
+3,21
+4,37
+5,60
+6,100
+10,200
+11,230
+\.
+
+-- test table for modifications
+CREATE TABLE limit_orders_mx (
+ id bigint PRIMARY KEY,
+ symbol text NOT NULL,
+ bidder_id bigint NOT NULL,
+ placed_at timestamp NOT NULL,
+ kind order_side_mx NOT NULL,
+ limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00)
+);
+
+SET citus.shard_count TO 2;
+SELECT create_distributed_table('limit_orders_mx', 'id');
+
+-- test table for modifications
+CREATE TABLE multiple_hash_mx (
+ category text NOT NULL,
+ data text NOT NULL
+);
+
+SELECT create_distributed_table('multiple_hash_mx', 'category');
+
+SET citus.shard_count TO 4;
+CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text);
+SELECT create_distributed_table('app_analytics_events_mx', 'app_id');
+
+
+CREATE TABLE researchers_mx (
+ id bigint NOT NULL,
+ lab_id int NOT NULL,
+ name text NOT NULL
+);
+
+SET citus.shard_count TO 2;
+SELECT create_distributed_table('researchers_mx', 'lab_id');
+
+CREATE TABLE labs_mx (
+ id bigint NOT NULL,
+ name text NOT NULL
+);
+
+SET citus.shard_count TO 1;
+SELECT create_distributed_table('labs_mx', 'id');
+
+-- now, for some special failures...
+CREATE TABLE objects_mx (
+ id bigint PRIMARY KEY,
+ name text NOT NULL
+);
+
+SELECT create_distributed_table('objects_mx', 'id', 'hash');
+
+CREATE TABLE articles_hash_mx (
+ id bigint NOT NULL,
+ author_id bigint NOT NULL,
+ title varchar(20) NOT NULL,
+ word_count integer
+);
+
+-- this table is used in router executor tests
+CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx);
+
+SET citus.shard_count TO 2;
+SELECT create_distributed_table('articles_hash_mx', 'author_id');
+
+SET citus.shard_count TO 1;
+SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id');
+
+SET citus.shard_count TO 4;
+CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int);
+SELECT create_distributed_table('company_employees_mx', 'company_id');
+
+WITH shard_counts AS (
+ SELECT logicalrelid, count(*) AS shard_count FROM pg_dist_shard GROUP BY logicalrelid
+ )
+SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel
+FROM pg_dist_partition NATURAL JOIN shard_counts
+ORDER BY colocationid, logicalrelid;
diff --git a/src/test/regress/sql/multi_mx_ddl.sql b/src/test/regress/sql/multi_mx_ddl.sql
new file mode 100644
index 000000000..d9778a365
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_ddl.sql
@@ -0,0 +1,117 @@
+-- Tests related to distributed DDL commands on mx cluster
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1600000;
+
+SELECT * FROM mx_ddl_table ORDER BY key;
+
+-- CREATE INDEX
+CREATE INDEX ddl_test_index ON mx_ddl_table(value);
+
+-- ADD COLUMN
+ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER;
+
+-- SET DEFAULT
+ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1;
+
+SELECT master_modify_multiple_shards('UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL');
+
+-- SET NOT NULL
+ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL;
+
+
+-- See that the changes are applied on schema node, worker tables and shards
+\d mx_ddl_table
+
+\c - - - :worker_1_port
+
+\d mx_ddl_table
+
+\d mx_ddl_table_1600000
+
+\c - - - :worker_2_port
+
+\d mx_ddl_table
+
+\d mx_ddl_table_1600001
+
+INSERT INTO mx_ddl_table VALUES (37, 78, 2);
+INSERT INTO mx_ddl_table VALUES (38, 78);
+
+-- Switch to the schema node
+\c - - - :master_port
+
+
+-- SET DATA TYPE
+ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision;
+
+INSERT INTO mx_ddl_table VALUES (78, 83, 2.1);
+
+\c - - - :worker_1_port
+SELECT * FROM mx_ddl_table ORDER BY key;
+
+-- Switch to the schema node
+\c - - - :master_port
+
+-- DROP INDEX
+DROP INDEX ddl_test_index;
+
+-- DROP DEFAULT
+ALTER TABLE mx_ddl_table ALTER COLUMN version DROP DEFAULT;
+
+-- DROP NOT NULL
+ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL;
+
+-- DROP COLUMN
+ALTER TABLE mx_ddl_table DROP COLUMN version;
+
+
+-- See that the changes are applied on schema node, worker tables and shards
+\d mx_ddl_table
+
+\c - - - :worker_1_port
+
+\d mx_ddl_table
+
+\d mx_ddl_table_1600000
+
+\c - - - :worker_2_port
+
+\d mx_ddl_table
+
+\d mx_ddl_table_1600001
+
+-- Show that DDL commands are done within a two-phase commit transaction
+\c - - - :master_port
+
+SET client_min_messages TO debug2;
+
+CREATE INDEX ddl_test_index ON mx_ddl_table(value);
+
+RESET client_min_messages;
+
+DROP INDEX ddl_test_index;
+
+-- show that sequences owned by mx tables result in unique values
+SET citus.shard_replication_factor TO 1;
+SET citus.shard_count TO 4;
+SET citus.replication_model TO streaming;
+CREATE TABLE mx_sequence(key INT, value BIGSERIAL);
+SELECT create_distributed_table('mx_sequence', 'key');
+
+\c - - - :worker_1_port
+
+SELECT groupid FROM pg_dist_local_group;
+SELECT * FROM mx_sequence_value_seq;
+
+\c - - - :worker_2_port
+
+SELECT groupid FROM pg_dist_local_group;
+SELECT * FROM mx_sequence_value_seq;
+
+\c - - - :master_port
+
+-- the type of sequences can't be changed
+ALTER TABLE mx_sequence ALTER value TYPE BIGINT;
+ALTER TABLE mx_sequence ALTER value TYPE INT;
+
diff --git a/src/test/regress/sql/multi_mx_explain.sql b/src/test/regress/sql/multi_mx_explain.sql
new file mode 100644
index 000000000..ea47a3285
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_explain.sql
@@ -0,0 +1,207 @@
+--
+-- MULTI_MX_EXPLAIN
+--
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
+\c - - - :master_port
+
+\a\t
+
+SET citus.task_executor_type TO 'real-time';
+SET citus.explain_distributed_queries TO on;
+
+\c - - - :worker_1_port
+-- Function that parses explain output as JSON
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+
+\c - - - :worker_2_port
+-- Function that parses explain output as JSON
+CREATE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+
+-- Function that parses explain output as XML
+CREATE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+-- Test JSON format
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+-- Validate JSON format
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+
+\c - - - :worker_1_port
+-- Test XML format
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+-- Validate XML format
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+
+-- Test YAML format
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+\c - - - :worker_2_port
+-- Test verbose
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx;
+
+-- Test join
+EXPLAIN (COSTS FALSE)
+ SELECT * FROM lineitem_mx
+ JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0
+ ORDER BY l_quantity LIMIT 10;
+
+-- Test insert
+EXPLAIN (COSTS FALSE)
+ INSERT INTO lineitem_mx VALUES(1,0);
+
+-- Test update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_mx
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+
+-- Test delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_mx
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+
+-- Test single-shard SELECT
+EXPLAIN (COSTS FALSE)
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5;
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$);
+
+-- Test CREATE TABLE ... AS
+EXPLAIN (COSTS FALSE)
+ CREATE TABLE explain_result AS
+ SELECT * FROM lineitem_mx;
+
+-- Test all tasks output
+SET citus.explain_all_tasks TO on;
+
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+
+SELECT true AS valid FROM explain_json($$
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$);
+
+-- Test track tracker
+SET citus.task_executor_type TO 'task-tracker';
+SET citus.explain_all_tasks TO off;
+
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030;
+
+-- Test re-partition join
+SET citus.large_table_shard_count TO 1;
+
+EXPLAIN (COSTS FALSE)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+SELECT true AS valid FROM explain_json($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT count(*)
+ FROM lineitem_mx, orders_mx, customer_mx, supplier_mx
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
diff --git a/src/test/regress/sql/multi_mx_metadata.sql b/src/test/regress/sql/multi_mx_metadata.sql
new file mode 100644
index 000000000..c51260fbf
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_metadata.sql
@@ -0,0 +1,162 @@
+-- Test creation of mx tables and metadata syncing
+
+-- get rid of the previously created entries in pg_dist_transaction
+-- for the sake of getting consistent results in this test file
+SELECT recover_prepared_transactions();
+
+CREATE TABLE distributed_mx_table (
+ key text primary key,
+ value jsonb
+);
+CREATE INDEX ON distributed_mx_table USING GIN (value);
+
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+
+SET citus.shard_count TO 4;
+SELECT create_distributed_table('distributed_mx_table', 'key');
+
+-- Verify that we've logged commit records
+SELECT count(*) FROM pg_dist_transaction;
+
+-- Confirm that the metadata transactions have been committed
+SELECT recover_prepared_transactions();
+
+-- Verify that the commit records have been removed
+SELECT count(*) FROM pg_dist_transaction;
+
+\c - - - :worker_1_port
+
+\d distributed_mx_table
+
+SELECT repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+
+SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+
+\c - - - :worker_2_port
+
+\d distributed_mx_table
+
+SELECT repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+
+SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
+WHERE logicalrelid = 'distributed_mx_table'::regclass;
+
+-- Create a table and then roll back the transaction
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+
+BEGIN;
+CREATE TABLE should_not_exist (
+ key text primary key,
+ value jsonb
+);
+SELECT create_distributed_table('should_not_exist', 'key');
+ABORT;
+
+-- Verify that the table does not exist on the worker
+\c - - - :worker_1_port
+SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist';
+
+-- Ensure that we don't allow prepare on a metadata transaction
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+
+BEGIN;
+CREATE TABLE should_not_exist (
+ key text primary key,
+ value jsonb
+);
+SELECT create_distributed_table('should_not_exist', 'key');
+PREPARE TRANSACTION 'this_should_fail';
+
+-- now show that we can create tables and schemas withing a single transaction
+BEGIN;
+CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts;
+SET search_path TO citus_mx_schema_for_xacts;
+SET citus.shard_replication_factor TO 1;
+SET citus.shard_count TO 1;
+
+CREATE TABLE objects_for_xacts (
+ id bigint PRIMARY KEY,
+ name text NOT NULL
+);
+SELECT create_distributed_table('objects_for_xacts', 'id');
+
+COMMIT;
+
+-- see that the table actually created and distributed
+\c - - - :worker_1_port
+SELECT repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
+
+SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
+WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
+
+\c - - - :master_port
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+
+-- now show that we can rollback on creating mx table, but shards remain....
+BEGIN;
+CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts;
+SET search_path TO citus_mx_schema_for_xacts;
+SET citus.shard_replication_factor TO 1;
+SET citus.shard_count TO 2;
+
+CREATE TABLE objects_for_xacts2 (
+ id bigint PRIMARY KEY,
+ name text NOT NULL
+);
+SELECT create_distributed_table('objects_for_xacts2', 'id');
+
+ROLLBACK;
+
+-- show that the table not exists on the schema node
+SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts';
+
+\c - - - :worker_1_port
+
+-- the distributed table not exists on the worker node
+SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts';
+-- but the shard exists since we do not create shards in a transaction
+SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts';
+
+-- make sure that master_drop_all_shards does not work from the worker nodes
+SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts');
+
+-- Ensure pg_dist_transaction is empty for test
+SELECT recover_prepared_transactions();
+
+-- Create some "fake" prepared transactions to recover
+\c - - - :worker_1_port
+
+BEGIN;
+CREATE TABLE should_abort (value int);
+PREPARE TRANSACTION 'citus_0_should_abort';
+
+BEGIN;
+CREATE TABLE should_commit (value int);
+PREPARE TRANSACTION 'citus_0_should_commit';
+
+BEGIN;
+CREATE TABLE should_be_sorted_into_middle (value int);
+PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle';
+
+\c - - - :master_port
+-- Add "fake" pg_dist_transaction records and run recovery
+INSERT INTO pg_dist_transaction VALUES (14, 'citus_0_should_commit');
+INSERT INTO pg_dist_transaction VALUES (14, 'citus_0_should_be_forgotten');
+
+SELECT recover_prepared_transactions();
+SELECT count(*) FROM pg_dist_transaction;
+
+-- Confirm that transactions were correctly rolled forward
+\c - - - :worker_1_port
+SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort';
+SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit';
diff --git a/src/test/regress/sql/multi_mx_modifications.sql b/src/test/regress/sql/multi_mx_modifications.sql
new file mode 100644
index 000000000..89805f23d
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_modifications.sql
@@ -0,0 +1,282 @@
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1330000;
+
+
+-- ===================================================================
+-- test end-to-end modification functionality for mx tables
+-- ===================================================================
+
+-- basic single-row INSERT
+INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
+ 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743;
+
+-- now singe-row INSERT from a worker
+\c - - - :worker_1_port
+INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
+ 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744;
+
+-- now singe-row INSERT to the other worker
+\c - - - :worker_2_port
+INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
+ 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745;
+
+-- and see all the inserted rows
+SELECT * FROM limit_orders_mx;
+
+-- basic single-row INSERT with RETURNING
+INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *;
+
+-- INSERT with DEFAULT in the target list
+INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell',
+ DEFAULT);
+SELECT * FROM limit_orders_mx WHERE id = 12756;
+
+-- INSERT with expressions in target list
+INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' +
+ interval '5 hours', 'buy', sqrt(2));
+SELECT * FROM limit_orders_mx WHERE id = 430;
+
+-- INSERT without partition key
+INSERT INTO limit_orders_mx DEFAULT VALUES;
+
+-- squelch WARNINGs that contain worker_port
+SET client_min_messages TO ERROR;
+
+-- INSERT violating NOT NULL constraint
+INSERT INTO limit_orders_mx VALUES (NULL, 'T', 975234, DEFAULT);
+
+-- INSERT violating column constraint
+INSERT INTO limit_orders_mx VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell',
+ -5.00);
+-- INSERT violating primary key constraint
+INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58);
+
+-- INSERT violating primary key constraint, with RETURNING specified.
+INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *;
+
+-- INSERT, with RETURNING specified, failing with a non-constraint error
+INSERT INTO limit_orders_mx VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0;
+
+SET client_min_messages TO DEFAULT;
+
+-- commands with non-constant partition values are unsupported
+INSERT INTO limit_orders_mx VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45',
+ 'sell', 0.58);
+
+-- values for other columns are totally fine
+INSERT INTO limit_orders_mx VALUES (2036, 'GOOG', 5634, now(), 'buy', random());
+
+-- commands with mutable functions in their quals
+DELETE FROM limit_orders_mx WHERE id = 246 AND bidder_id = (random() * 1000);
+
+-- commands with mutable but non-volatile functions(ie: stable func.) in their quals
+-- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable)
+DELETE FROM limit_orders_mx WHERE id = 246 AND placed_at = current_timestamp::timestamp;
+
+-- commands with multiple rows are unsupported
+INSERT INTO limit_orders_mx VALUES (DEFAULT), (DEFAULT);
+
+-- INSERT ... SELECT ... FROM commands are unsupported from workers
+INSERT INTO limit_orders_mx SELECT * FROM limit_orders_mx;
+
+-- connect back to the other node
+\c - - - :worker_1_port
+
+-- commands containing a CTE are unsupported
+WITH deleted_orders AS (DELETE FROM limit_orders_mx RETURNING *)
+INSERT INTO limit_orders_mx DEFAULT VALUES;
+
+-- test simple DELETE
+INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+
+DELETE FROM limit_orders_mx WHERE id = 246;
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+
+-- test simple DELETE with RETURNING
+DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *;
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430;
+
+-- DELETE with expression in WHERE clause
+INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+
+DELETE FROM limit_orders_mx WHERE id = (2 * 123);
+SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246;
+
+-- commands with no constraints on the partition key are not supported
+DELETE FROM limit_orders_mx WHERE bidder_id = 162;
+
+-- commands with a USING clause are unsupported
+CREATE TABLE bidders ( name text, id bigint );
+DELETE FROM limit_orders_mx USING bidders WHERE limit_orders_mx.id = 246 AND
+ limit_orders_mx.bidder_id = bidders.id AND
+ bidders.name = 'Bernie Madoff';
+
+-- commands containing a CTE are unsupported
+WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *)
+DELETE FROM limit_orders_mx;
+
+-- cursors are not supported
+DELETE FROM limit_orders_mx WHERE CURRENT OF cursor_name;
+
+INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69);
+
+-- simple UPDATE
+UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246;
+SELECT symbol FROM limit_orders_mx WHERE id = 246;
+
+-- simple UPDATE with RETURNING
+UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *;
+
+-- expression UPDATE
+UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246;
+SELECT bidder_id FROM limit_orders_mx WHERE id = 246;
+
+-- expression UPDATE with RETURNING
+UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *;
+
+-- multi-column UPDATE
+UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246;
+SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246;
+
+-- multi-column UPDATE with RETURNING
+UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *;
+
+-- Test that on unique contraint violations, we fail fast
+INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
+INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
+
+-- commands with no constraints on the partition key are not supported
+UPDATE limit_orders_mx SET limit_price = 0.00;
+
+-- attempting to change the partition key is unsupported
+UPDATE limit_orders_mx SET id = 0 WHERE id = 246;
+
+-- UPDATEs with a FROM clause are unsupported
+UPDATE limit_orders_mx SET limit_price = 0.00 FROM bidders
+ WHERE limit_orders_mx.id = 246 AND
+ limit_orders_mx.bidder_id = bidders.id AND
+ bidders.name = 'Bernie Madoff';
+
+-- commands containing a CTE are unsupported
+WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *)
+UPDATE limit_orders_mx SET symbol = 'GM';
+
+SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246;
+
+-- updates referencing just a var are supported
+UPDATE limit_orders_mx SET bidder_id = id WHERE id = 246;
+
+-- updates referencing a column are supported
+UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246;
+
+-- IMMUTABLE functions are allowed
+UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246;
+
+SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246;
+
+-- IMMUTABLE functions are allowed -- even in returning
+UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol;
+
+-- connect schema node to run the DDL
+\c - - - :master_port
+ALTER TABLE limit_orders_mx ADD COLUMN array_of_values integer[];
+
+-- connect back to the other node
+\c - - - :worker_2_port
+
+-- updates referencing STABLE functions are allowed
+UPDATE limit_orders_mx SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246;
+-- so are binary operators
+UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246;
+
+-- connect back to the other node
+\c - - - :worker_2_port
+
+-- immutable function calls with vars are also allowed
+UPDATE limit_orders_mx
+SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246;
+
+CREATE FUNCTION stable_append_mx(old_values int[], new_value int)
+RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$
+LANGUAGE plpgsql STABLE;
+
+-- but STABLE function calls with vars are not allowed
+UPDATE limit_orders_mx
+SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246;
+
+SELECT array_of_values FROM limit_orders_mx WHERE id = 246;
+
+-- STRICT functions work as expected
+CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS
+'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT;
+UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246;
+
+SELECT array_of_values FROM limit_orders_mx WHERE id = 246;
+
+-- connect schema node to run the DDL
+\c - - - :master_port
+ALTER TABLE limit_orders_mx DROP array_of_values;
+
+-- connect back to the other node
+\c - - - :worker_2_port
+
+-- even in RETURNING
+UPDATE limit_orders_mx SET placed_at = placed_at WHERE id = 246 RETURNING NOW();
+
+-- cursors are not supported
+UPDATE limit_orders_mx SET symbol = 'GM' WHERE CURRENT OF cursor_name;
+
+-- check that multi-row UPDATE/DELETEs with RETURNING work
+INSERT INTO multiple_hash_mx VALUES ('0', '1');
+INSERT INTO multiple_hash_mx VALUES ('0', '2');
+INSERT INTO multiple_hash_mx VALUES ('0', '3');
+INSERT INTO multiple_hash_mx VALUES ('0', '4');
+INSERT INTO multiple_hash_mx VALUES ('0', '5');
+INSERT INTO multiple_hash_mx VALUES ('0', '6');
+
+UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *;
+DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *;
+
+-- ensure returned row counters are correct
+\set QUIET off
+INSERT INTO multiple_hash_mx VALUES ('1', '1');
+INSERT INTO multiple_hash_mx VALUES ('1', '2');
+INSERT INTO multiple_hash_mx VALUES ('1', '3');
+INSERT INTO multiple_hash_mx VALUES ('2', '1');
+INSERT INTO multiple_hash_mx VALUES ('2', '2');
+INSERT INTO multiple_hash_mx VALUES ('2', '3');
+INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *;
+
+-- check that update return the right number of rows
+-- one row
+UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '1' AND data = '1';
+-- three rows
+UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1';
+-- three rows, with RETURNING
+UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category;
+-- check
+SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data;
+
+-- check that deletes return the right number of rows
+-- one row
+DELETE FROM multiple_hash_mx WHERE category = '2' AND data = '1';
+-- two rows
+DELETE FROM multiple_hash_mx WHERE category = '2';
+-- three rows, with RETURNING
+DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category;
+-- check
+SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data;
+SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data;
+
+-- verify interaction of default values, SERIAL, and RETURNING
+\set QUIET on
+
+INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id;
+INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id;
+INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *;
diff --git a/src/test/regress/sql/multi_mx_modifying_xacts.sql b/src/test/regress/sql/multi_mx_modifying_xacts.sql
new file mode 100644
index 000000000..f04cab33b
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_modifying_xacts.sql
@@ -0,0 +1,330 @@
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1340000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1340000;
+
+
+-- ===================================================================
+-- test end-to-end modification functionality for mx tables in transactions
+-- ===================================================================
+
+-- add some data
+INSERT INTO researchers_mx VALUES (1, 1, 'Donald Knuth');
+INSERT INTO researchers_mx VALUES (2, 1, 'Niklaus Wirth');
+INSERT INTO researchers_mx VALUES (3, 2, 'Tony Hoare');
+INSERT INTO researchers_mx VALUES (4, 2, 'Kenneth Iverson');
+
+-- replace a researcher, reusing their id on the schema node
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+INSERT INTO researchers_mx VALUES (2, 1, 'John Backus');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+
+-- do it on the worker node as well
+\c - - - :worker_1_port
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+
+-- do it on the worker other node as well
+\c - - - :worker_2_port
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2');
+COMMIT;
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
+
+
+\c - - - :master_port
+
+-- abort a modification
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ABORT;
+
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+
+\c - - - :worker_1_port
+
+-- abort a modification on the worker node
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ABORT;
+
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+
+\c - - - :worker_2_port
+
+-- abort a modification on the other worker node
+BEGIN;
+DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+ABORT;
+
+SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
+
+
+-- switch back to the first worker node
+\c - - - :worker_1_port
+
+-- creating savepoints should work...
+BEGIN;
+INSERT INTO researchers_mx VALUES (5, 3, 'Dennis Ritchie');
+SAVEPOINT hire_thompson;
+INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson');
+COMMIT;
+
+SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6;
+
+-- even if created by PL/pgSQL...
+\set VERBOSITY terse
+BEGIN;
+DO $$
+BEGIN
+ INSERT INTO researchers_mx VALUES (10, 10, 'Edsger Dijkstra');
+EXCEPTION
+ WHEN not_null_violation THEN
+ RAISE NOTICE 'caught not_null_violation';
+END $$;
+COMMIT;
+
+-- but rollback should not
+BEGIN;
+INSERT INTO researchers_mx VALUES (7, 4, 'Jim Gray');
+SAVEPOINT hire_engelbart;
+INSERT INTO researchers_mx VALUES (8, 4, 'Douglas Engelbart');
+ROLLBACK TO hire_engelbart;
+COMMIT;
+
+SELECT name FROM researchers_mx WHERE lab_id = 4;
+
+BEGIN;
+DO $$
+BEGIN
+ INSERT INTO researchers_mx VALUES (NULL, 10, 'Edsger Dijkstra');
+EXCEPTION
+ WHEN not_null_violation THEN
+ RAISE NOTICE 'caught not_null_violation';
+END $$;
+COMMIT;
+\set VERBOSITY default
+
+
+-- should be valid to edit labs_mx after researchers_mx...
+BEGIN;
+INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
+INSERT INTO labs_mx VALUES (5, 'Los Alamos');
+COMMIT;
+
+SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
+
+-- but not the other way around (would require expanding xact participants)...
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport');
+COMMIT;
+
+-- have the same test on the other worker node
+\c - - - :worker_2_port
+-- should be valid to edit labs_mx after researchers_mx...
+BEGIN;
+INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
+INSERT INTO labs_mx VALUES (5, 'Los Alamos');
+COMMIT;
+
+SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
+
+-- but not the other way around (would require expanding xact participants)...
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport');
+COMMIT;
+
+-- switch back to the worker node
+\c - - - :worker_1_port
+
+-- this logic doesn't apply to router SELECTs occurring after a modification:
+-- selecting from the modified node is fine...
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+SELECT count(*) FROM researchers_mx WHERE lab_id = 6;
+ABORT;
+
+-- applies to DDL
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+ALTER TABLE labs_mx ADD COLUMN text motto;
+COMMIT;
+
+-- doesn't apply to COPY after modifications
+BEGIN;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+\copy labs_mx from stdin delimiter ','
+10,Weyland-Yutani-1
+\.
+COMMIT;
+
+-- copy will also work if before any modifications
+BEGIN;
+\copy labs_mx from stdin delimiter ','
+10,Weyland-Yutani-2
+\.
+SELECT name FROM labs_mx WHERE id = 10;
+INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
+COMMIT;
+
+\c - - - :worker_1_port
+-- test primary key violations
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (1, 'orange');
+COMMIT;
+
+-- data shouldn't have persisted...
+SELECT * FROM objects_mx WHERE id = 1;
+
+-- same test on the second worker node
+\c - - - :worker_2_port
+-- test primary key violations
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (1, 'orange');
+COMMIT;
+
+-- data shouldn't have persisted...
+SELECT * FROM objects_mx WHERE id = 1;
+
+-- create trigger on one worker to reject certain values
+\c - - - :worker_1_port
+
+CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$
+ BEGIN
+ IF (NEW.name = 'BAD') THEN
+ RAISE 'illegal value';
+ END IF;
+
+ RETURN NEW;
+ END;
+$rb$ LANGUAGE plpgsql;
+
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON objects_mx_1220103
+DEFERRABLE INITIALLY IMMEDIATE
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+
+-- test partial failure; statement 1 successed, statement 2 fails
+\set VERBOSITY terse
+BEGIN;
+INSERT INTO labs_mx VALUES (7, 'E Corp');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+COMMIT;
+
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 2;
+SELECT * FROM labs_mx WHERE id = 7;
+
+-- same failure test from worker 2
+\c - - - :worker_2_port
+
+-- test partial failure; statement 1 successed, statement 2 fails
+BEGIN;
+INSERT INTO labs_mx VALUES (7, 'E Corp');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+COMMIT;
+
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 2;
+SELECT * FROM labs_mx WHERE id = 7;
+
+\c - - - :worker_1_port
+
+-- what if there are errors on different shards at different times?
+\c - - - :worker_1_port
+
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON labs_mx_1220102
+DEFERRABLE INITIALLY IMMEDIATE
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+INSERT INTO labs_mx VALUES (9, 'BAD');
+COMMIT;
+
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 1;
+SELECT * FROM labs_mx WHERE id = 8;
+
+-- same test from the other worker
+\c - - - :worker_2_port
+
+
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+INSERT INTO labs_mx VALUES (9, 'BAD');
+COMMIT;
+
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 1;
+SELECT * FROM labs_mx WHERE id = 8;
+
+
+-- what if the failures happen at COMMIT time?
+\c - - - :worker_1_port
+
+DROP TRIGGER reject_bad_mx ON objects_mx_1220103;
+
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON objects_mx_1220103
+DEFERRABLE INITIALLY DEFERRED
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+
+-- should be the same story as before, just at COMMIT time
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation');
+COMMIT;
+
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 2;
+SELECT * FROM labs_mx WHERE id = 7;
+
+
+DROP TRIGGER reject_bad_mx ON labs_mx_1220102;
+
+CREATE CONSTRAINT TRIGGER reject_bad_mx
+AFTER INSERT ON labs_mx_1220102
+DEFERRABLE INITIALLY DEFERRED
+FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx();
+
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO objects_mx VALUES (2, 'BAD');
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+INSERT INTO labs_mx VALUES (9, 'BAD');
+COMMIT;
+
+-- data should NOT be persisted
+SELECT * FROM objects_mx WHERE id = 1;
+SELECT * FROM labs_mx WHERE id = 8;
+
+-- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails?
+\c - - - :worker_1_port
+
+DROP TRIGGER reject_bad_mx ON objects_mx_1220103;
+
+BEGIN;
+INSERT INTO objects_mx VALUES (1, 'apple');
+INSERT INTO labs_mx VALUES (8, 'Aperture Science');
+INSERT INTO labs_mx VALUES (9, 'BAD');
+COMMIT;
+
+-- no data should persists
+SELECT * FROM objects_mx WHERE id = 1;
+SELECT * FROM labs_mx WHERE id = 8;
diff --git a/src/test/regress/sql/multi_mx_reference_table.sql b/src/test/regress/sql/multi_mx_reference_table.sql
new file mode 100644
index 000000000..c4843c077
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_reference_table.sql
@@ -0,0 +1,521 @@
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+
+\c - - - :master_port
+CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_reference_table('reference_table_test');
+
+INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03');
+INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04');
+INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05');
+
+\c - - - :worker_1_port
+-- run some queries on top of the data
+SELECT
+ *
+FROM
+ reference_table_test;
+
+SELECT
+ *
+FROM
+ reference_table_test
+WHERE
+ value_1 = 1;
+
+SELECT
+ value_1,
+ value_2
+FROM
+ reference_table_test
+ORDER BY
+ 2 ASC LIMIT 3;
+
+SELECT
+ value_1, value_3
+FROM
+ reference_table_test
+WHERE
+ value_2 >= 4
+ORDER BY
+ 2 LIMIT 3;
+
+SELECT
+ value_1, 15 * value_2
+FROM
+ reference_table_test
+ORDER BY
+ 2 ASC
+LIMIT 2;
+
+SELECT
+ value_1, 15 * value_2
+FROM
+ reference_table_test
+ORDER BY
+ 2 ASC LIMIT 2 OFFSET 2;
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 = 2 OR value_2 = 3;
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 = 2 AND value_2 = 3;
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ value_3 = '2' OR value_1 = 3;
+
+SELECT
+ value_2, value_4
+FROM
+ reference_table_test
+WHERE
+ (
+ value_3 = '2' OR value_1 = 3
+ )
+ AND FALSE;
+
+SELECT
+ *
+FROM
+ reference_table_test
+WHERE
+ value_2 IN
+ (
+ SELECT
+ value_3::FLOAT
+ FROM
+ reference_table_test
+ )
+ AND value_1 < 3;
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_3 IN
+ (
+ '1', '2'
+ );
+
+SELECT
+ date_part('day', value_4)
+FROM
+ reference_table_test
+WHERE
+ value_3 IN
+ (
+ '5', '2'
+ );
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 <= 2 AND value_2 >= 4;
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 <= 20 AND value_2 >= 4;
+
+SELECT
+ value_4
+FROM
+ reference_table_test
+WHERE
+ value_2 >= 5 AND value_2 <= random();
+
+SELECT
+ value_1
+FROM
+ reference_table_test
+WHERE
+ value_4 BETWEEN '2016-12-01' AND '2016-12-03';
+
+SELECT
+ value_1
+FROM
+ reference_table_test
+WHERE
+ FALSE;
+SELECT
+ value_1
+FROM
+ reference_table_test
+WHERE
+ int4eq(1, 2);
+
+-- rename output name and do some operations
+SELECT
+ value_1 as id, value_2 * 15 as age
+FROM
+ reference_table_test;
+
+-- queries with CTEs are supported
+WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3)
+SELECT
+ *
+FROM
+ some_data;
+
+-- queries with CTEs are supported even if CTE is not referenced inside query
+WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3)
+SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1;
+
+-- queries which involve functions in FROM clause are supported if it goes to a single worker.
+SELECT
+ *
+FROM
+ reference_table_test, position('om' in 'Thomas')
+WHERE
+ value_1 = 1;
+
+SELECT
+ *
+FROM
+ reference_table_test, position('om' in 'Thomas')
+WHERE
+ value_1 = 1 OR value_1 = 2;
+
+-- set operations are supported
+(SELECT * FROM reference_table_test WHERE value_1 = 1)
+UNION
+(SELECT * FROM reference_table_test WHERE value_1 = 3);
+
+(SELECT * FROM reference_table_test WHERE value_1 = 1)
+EXCEPT
+(SELECT * FROM reference_table_test WHERE value_1 = 3);
+
+(SELECT * FROM reference_table_test WHERE value_1 = 1)
+INTERSECT
+(SELECT * FROM reference_table_test WHERE value_1 = 3);
+
+-- to make the tests more interested for aggregation tests, ingest some more data
+\c - - - :master_port
+INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03');
+\c - - - :worker_1_port
+
+-- some aggregations
+SELECT
+ value_4, SUM(value_2)
+FROM
+ reference_table_test
+GROUP BY
+ value_4
+HAVING
+ SUM(value_2) > 3
+ORDER BY
+ 1;
+
+SELECT
+ value_4,
+ value_3,
+ SUM(value_2)
+FROM
+ reference_table_test
+GROUP BY
+ GROUPING sets ((value_4), (value_3))
+ORDER BY 1, 2, 3;
+
+
+-- distinct clauses also work fine
+SELECT DISTINCT
+ value_4
+FROM
+ reference_table_test
+ORDER BY
+ 1;
+
+-- window functions are also supported
+SELECT
+ value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4)
+FROM
+ reference_table_test;
+
+-- window functions are also supported
+SELECT
+ value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4)
+FROM
+ reference_table_test;
+
+SELECT
+ count(DISTINCT CASE
+ WHEN
+ value_2 >= 3
+ THEN
+ value_2
+ ELSE
+ NULL
+ END) as c
+ FROM
+ reference_table_test;
+
+SELECT
+ value_1,
+ count(DISTINCT CASE
+ WHEN
+ value_2 >= 3
+ THEN
+ value_2
+ ELSE
+ NULL
+ END) as c
+ FROM
+ reference_table_test
+ GROUP BY
+ value_1
+ ORDER BY
+ 1;
+
+-- selects inside a transaction works fine as well
+
+BEGIN;
+SELECT * FROM reference_table_test;
+SELECT * FROM reference_table_test WHERE value_1 = 1;
+END;
+
+-- cursor queries also works fine
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM reference_table_test
+ WHERE value_1 = 1 OR value_1 = 2
+ ORDER BY value_1;
+FETCH test_cursor;
+FETCH ALL test_cursor;
+FETCH test_cursor; -- fetch one row after the last
+END;
+
+-- table creation queries inside can be router plannable
+CREATE TEMP TABLE temp_reference_test as
+ SELECT *
+ FROM reference_table_test
+ WHERE value_1 = 1;
+
+\c - - - :master_port
+-- all kinds of joins are supported among reference tables
+-- first create two more tables
+CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_reference_table('reference_table_test_second');
+
+CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_reference_table('reference_table_test_third');
+
+-- ingest some data to both tables
+INSERT INTO reference_table_test_second VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test_second VALUES (2, 2.0, '2', '2016-12-02');
+INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03');
+
+INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04');
+INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05');
+
+\c - - - :worker_2_port
+
+-- some very basic tests
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = t2.value_2
+ORDER BY
+ 1;
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_third t3
+WHERE
+ t1.value_2 = t3.value_2
+ORDER BY
+ 1;
+
+SELECT
+ DISTINCT t2.value_1
+FROM
+ reference_table_test_second t2, reference_table_test_third t3
+WHERE
+ t2.value_2 = t3.value_2
+ORDER BY
+ 1;
+
+-- join on different columns and different data types via casts
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = t2.value_1
+ORDER BY
+ 1;
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = t2.value_3::int
+ORDER BY
+ 1;
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2
+WHERE
+ t1.value_2 = date_part('day', t2.value_4)
+ORDER BY
+ 1;
+
+-- ingest a common row to see more meaningful results with joins involving 3 tables
+\c - - - :master_port
+INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03');
+\c - - - :worker_1_port
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3
+WHERE
+ t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2
+ORDER BY
+ 1;
+
+-- same query on different columns
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3
+WHERE
+ t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1
+ORDER BY
+ 1;
+
+-- with the JOIN syntax
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1 JOIN reference_table_test_second t2 USING (value_1)
+ JOIN reference_table_test_third t3 USING (value_1)
+ORDER BY
+ 1;
+
+-- and left/right joins
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1 LEFT JOIN reference_table_test_second t2 USING (value_1)
+ LEFT JOIN reference_table_test_third t3 USING (value_1)
+ORDER BY
+ 1;
+
+SELECT
+ DISTINCT t1.value_1
+FROM
+ reference_table_test t1 RIGHT JOIN reference_table_test_second t2 USING (value_1)
+ RIGHT JOIN reference_table_test_third t3 USING (value_1)
+ORDER BY
+ 1;
+
+\c - - - :master_port
+SET citus.shard_count TO 6;
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+
+CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_distributed_table('colocated_table_test', 'value_1');
+
+CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp);
+SELECT create_distributed_table('colocated_table_test_2', 'value_1');
+
+DELETE FROM reference_table_test;
+INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
+
+INSERT INTO colocated_table_test VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02');
+
+INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01');
+INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02');
+
+\c - - - :worker_1_port
+SET client_min_messages TO DEBUG1;
+SET citus.log_multi_join_order TO TRUE;
+
+SELECT
+ reference_table_test.value_1
+FROM
+ reference_table_test, colocated_table_test
+WHERE
+ colocated_table_test.value_1 = reference_table_test.value_1;
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test
+WHERE
+ colocated_table_test.value_2 = reference_table_test.value_2;
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ colocated_table_test, reference_table_test
+WHERE
+ reference_table_test.value_1 = colocated_table_test.value_1;
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_2 = reference_table_test.value_2;
+
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2;
+
+SET citus.task_executor_type to "task-tracker";
+SELECT
+ colocated_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2;
+
+SELECT
+ reference_table_test.value_2
+FROM
+ reference_table_test, colocated_table_test, colocated_table_test_2
+WHERE
+ colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1;
+
+
+SET client_min_messages TO NOTICE;
+SET citus.log_multi_join_order TO FALSE;
+
+-- clean up tables
+\c - - - :master_port
+DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third;;
diff --git a/src/test/regress/sql/multi_mx_repartition_join_w1.sql b/src/test/regress/sql/multi_mx_repartition_join_w1.sql
new file mode 100644
index 000000000..8e19582b6
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_repartition_join_w1.sql
@@ -0,0 +1,14 @@
+-- Test two concurrent reparttition joins from two different workers
+-- This test runs the below query from the :worker_1_port and the
+-- concurrent test runs the same query on :worker_2_port. Note that, both
+-- tests use the same sequence ids but the queries should not fail.
+\c - - - :worker_1_port
+
+SET citus.task_executor_type TO "task-tracker";
+CREATE TEMP TABLE t1 AS
+SELECT
+ l1.l_comment
+FROM
+ lineitem_mx l1, orders_mx l2
+WHERE
+ l1.l_comment = l2.o_comment;
diff --git a/src/test/regress/sql/multi_mx_repartition_join_w2.sql b/src/test/regress/sql/multi_mx_repartition_join_w2.sql
new file mode 100644
index 000000000..9fd8c66b3
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_repartition_join_w2.sql
@@ -0,0 +1,14 @@
+-- Test two concurrent reparttition joins from two different workers
+-- This test runs the below query from the :worker_2_port and the
+-- concurrent test runs the same query on :worker_1_port. Note that, both
+-- tests use the same sequence ids but the queries should not fail.
+\c - - - :worker_2_port
+
+SET citus.task_executor_type TO "task-tracker";
+CREATE TEMP TABLE t1 AS
+SELECT
+ l1.l_comment
+FROM
+ lineitem_mx l1, orders_mx l2
+WHERE
+ l1.l_comment = l2.o_comment;
diff --git a/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql b/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql
new file mode 100644
index 000000000..1fc1c1496
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql
@@ -0,0 +1,218 @@
+--
+-- MULTI_MX_REPARTITION_UDT_PREPARE
+--
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
+
+-- START type creation
+
+CREATE TYPE test_udt AS (i integer, i2 integer);
+
+-- ... as well as a function to use as its comparator...
+CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
+AS 'select $1.i = $2.i AND $1.i2 = $2.i2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+
+-- ... use that function to create a custom equality operator...
+CREATE OPERATOR = (
+ LEFTARG = test_udt,
+ RIGHTARG = test_udt,
+ PROCEDURE = equal_test_udt_function,
+ COMMUTATOR = =,
+ HASHES
+);
+
+-- ... and create a custom operator family for hash indexes...
+CREATE OPERATOR FAMILY tudt_op_fam USING hash;
+
+-- ... create a test HASH function. Though it is a poor hash function,
+-- it is acceptable for our tests
+CREATE FUNCTION test_udt_hash(test_udt) RETURNS int
+AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+
+
+-- We need to define two different operator classes for the composite types
+-- One uses BTREE the other uses HASH
+CREATE OPERATOR CLASS tudt_op_fam_clas3
+DEFAULT FOR TYPE test_udt USING BTREE AS
+OPERATOR 3 = (test_udt, test_udt);
+
+CREATE OPERATOR CLASS tudt_op_fam_class
+DEFAULT FOR TYPE test_udt USING HASH AS
+OPERATOR 1 = (test_udt, test_udt),
+FUNCTION 1 test_udt_hash(test_udt);
+
+-- END type creation
+
+CREATE TABLE repartition_udt (
+ pk integer not null,
+ udtcol test_udt,
+ txtcol text
+);
+
+CREATE TABLE repartition_udt_other (
+ pk integer not null,
+ udtcol test_udt,
+ txtcol text
+);
+
+-- Connect directly to a worker, create and drop the type, then
+-- proceed with type creation as above; thus the OIDs will be different.
+-- so that the OID is off.
+
+\c - - - :worker_1_port
+
+CREATE TYPE test_udt AS (i integer, i2 integer);
+DROP TYPE test_udt CASCADE;
+
+-- START type creation
+
+CREATE TYPE test_udt AS (i integer, i2 integer);
+
+-- ... as well as a function to use as its comparator...
+CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
+AS 'select $1.i = $2.i AND $1.i2 = $2.i2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+
+-- ... use that function to create a custom equality operator...
+CREATE OPERATOR = (
+ LEFTARG = test_udt,
+ RIGHTARG = test_udt,
+ PROCEDURE = equal_test_udt_function,
+ COMMUTATOR = =,
+ HASHES
+);
+
+-- ... and create a custom operator family for hash indexes...
+CREATE OPERATOR FAMILY tudt_op_fam USING hash;
+
+-- ... create a test HASH function. Though it is a poor hash function,
+-- it is acceptable for our tests
+CREATE FUNCTION test_udt_hash(test_udt) RETURNS int
+AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+
+
+-- We need to define two different operator classes for the composite types
+-- One uses BTREE the other uses HASH
+CREATE OPERATOR CLASS tudt_op_fam_clas3
+DEFAULT FOR TYPE test_udt USING BTREE AS
+OPERATOR 3 = (test_udt, test_udt);
+
+CREATE OPERATOR CLASS tudt_op_fam_class
+DEFAULT FOR TYPE test_udt USING HASH AS
+OPERATOR 1 = (test_udt, test_udt),
+FUNCTION 1 test_udt_hash(test_udt);
+
+-- END type creation
+
+\c - - - :worker_2_port
+
+-- START type creation
+
+CREATE TYPE test_udt AS (i integer, i2 integer);
+
+-- ... as well as a function to use as its comparator...
+CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean
+AS 'select $1.i = $2.i AND $1.i2 = $2.i2;'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+
+-- ... use that function to create a custom equality operator...
+CREATE OPERATOR = (
+ LEFTARG = test_udt,
+ RIGHTARG = test_udt,
+ PROCEDURE = equal_test_udt_function,
+ COMMUTATOR = =,
+ HASHES
+);
+
+-- ... and create a custom operator family for hash indexes...
+CREATE OPERATOR FAMILY tudt_op_fam USING hash;
+
+-- ... create a test HASH function. Though it is a poor hash function,
+-- it is acceptable for our tests
+CREATE FUNCTION test_udt_hash(test_udt) RETURNS int
+AS 'SELECT hashtext( ($1.i + $1.i2)::text);'
+LANGUAGE SQL
+IMMUTABLE
+RETURNS NULL ON NULL INPUT;
+
+
+-- We need to define two different operator classes for the composite types
+-- One uses BTREE the other uses HASH
+CREATE OPERATOR CLASS tudt_op_fam_clas3
+DEFAULT FOR TYPE test_udt USING BTREE AS
+OPERATOR 3 = (test_udt, test_udt);
+
+CREATE OPERATOR CLASS tudt_op_fam_class
+DEFAULT FOR TYPE test_udt USING HASH AS
+OPERATOR 1 = (test_udt, test_udt),
+FUNCTION 1 test_udt_hash(test_udt);
+
+-- END type creation
+
+-- Connect to master
+
+\c - - - :master_port
+
+-- Distribute and populate the two tables.
+SET citus.shard_replication_factor TO 1;
+SET citus.replication_model TO streaming;
+
+SET citus.shard_count TO 3;
+SELECT create_distributed_table('repartition_udt', 'pk');
+
+SET citus.shard_count TO 5;
+SELECT create_distributed_table('repartition_udt_other', 'pk');
+
+INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (3, '(1,3)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (4, '(2,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (5, '(2,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt values (6, '(2,3)'::test_udt, 'foo');
+
+INSERT INTO repartition_udt_other values (7, '(1,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (8, '(1,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (9, '(1,3)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo');
+INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo');
+
+SET client_min_messages = LOG;
+
+-- Query that should result in a repartition join on int column, and be empty.
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.pk = repartition_udt_other.pk
+ WHERE repartition_udt.pk > 1;
+
+-- Query that should result in a repartition join on UDT column.
+SET citus.large_table_shard_count = 1;
+SET citus.task_executor_type = 'task-tracker';
+SET citus.log_multi_join_order = true;
+
+EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1;
+
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1
+ ORDER BY repartition_udt.pk;
+
+\c - - - :worker_1_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
+\c - - - :worker_2_port
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
diff --git a/src/test/regress/sql/multi_mx_repartition_udt_w1.sql b/src/test/regress/sql/multi_mx_repartition_udt_w1.sql
new file mode 100644
index 000000000..9b91f7d9a
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_repartition_udt_w1.sql
@@ -0,0 +1,20 @@
+--
+-- MULTI_MX_REPARTITION_W1_UDT
+--
+
+\c - - - :worker_1_port
+SET client_min_messages = LOG;
+-- Query that should result in a repartition join on UDT column.
+SET citus.large_table_shard_count = 1;
+SET citus.task_executor_type = 'task-tracker';
+SET citus.log_multi_join_order = true;
+
+-- Query that should result in a repartition join on int column, and be empty.
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.pk = repartition_udt_other.pk
+ WHERE repartition_udt.pk > 1;
+
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1
+ ORDER BY repartition_udt.pk;
diff --git a/src/test/regress/sql/multi_mx_repartition_udt_w2.sql b/src/test/regress/sql/multi_mx_repartition_udt_w2.sql
new file mode 100644
index 000000000..26b45a631
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_repartition_udt_w2.sql
@@ -0,0 +1,20 @@
+--
+-- MULTI_MX_REPARTITION_W2_UDT
+--
+
+\c - - - :worker_2_port
+SET client_min_messages = LOG;
+-- Query that should result in a repartition join on UDT column.
+SET citus.large_table_shard_count = 1;
+SET citus.task_executor_type = 'task-tracker';
+SET citus.log_multi_join_order = true;
+
+-- Query that should result in a repartition join on int column, and be empty.
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.pk = repartition_udt_other.pk
+ WHERE repartition_udt.pk > 1;
+
+SELECT * FROM repartition_udt JOIN repartition_udt_other
+ ON repartition_udt.udtcol = repartition_udt_other.udtcol
+ WHERE repartition_udt.pk > 1
+ ORDER BY repartition_udt.pk;
diff --git a/src/test/regress/sql/multi_mx_router_planner.sql b/src/test/regress/sql/multi_mx_router_planner.sql
new file mode 100644
index 000000000..cbcc14d52
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_router_planner.sql
@@ -0,0 +1,658 @@
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
+
+
+-- ===================================================================
+-- test router planner functionality for single shard select queries
+-- ===================================================================
+
+-- run all the router queries from the one of the workers
+
+\c - - - :worker_1_port
+-- this table is used in a CTE test
+CREATE TABLE authors_hash_mx ( name text, id bigint );
+
+-- create a bunch of test data
+INSERT INTO articles_hash_mx VALUES ( 1, 1, 'arsenous', 9572);
+INSERT INTO articles_hash_mx VALUES ( 2, 2, 'abducing', 13642);
+INSERT INTO articles_hash_mx VALUES ( 3, 3, 'asternal', 10480);
+INSERT INTO articles_hash_mx VALUES ( 4, 4, 'altdorfer', 14551);
+INSERT INTO articles_hash_mx VALUES ( 5, 5, 'aruru', 11389);
+INSERT INTO articles_hash_mx VALUES ( 6, 6, 'atlases', 15459);
+INSERT INTO articles_hash_mx VALUES ( 7, 7, 'aseptic', 12298);
+INSERT INTO articles_hash_mx VALUES ( 8, 8, 'agatized', 16368);
+INSERT INTO articles_hash_mx VALUES ( 9, 9, 'alligate', 438);
+INSERT INTO articles_hash_mx VALUES (10, 10, 'aggrandize', 17277);
+INSERT INTO articles_hash_mx VALUES (11, 1, 'alamo', 1347);
+INSERT INTO articles_hash_mx VALUES (12, 2, 'archiblast', 18185);
+INSERT INTO articles_hash_mx VALUES (13, 3, 'aseyev', 2255);
+INSERT INTO articles_hash_mx VALUES (14, 4, 'andesite', 19094);
+INSERT INTO articles_hash_mx VALUES (15, 5, 'adversa', 3164);
+INSERT INTO articles_hash_mx VALUES (16, 6, 'allonym', 2);
+INSERT INTO articles_hash_mx VALUES (17, 7, 'auriga', 4073);
+INSERT INTO articles_hash_mx VALUES (18, 8, 'assembly', 911);
+INSERT INTO articles_hash_mx VALUES (19, 9, 'aubergiste', 4981);
+INSERT INTO articles_hash_mx VALUES (20, 10, 'absentness', 1820);
+INSERT INTO articles_hash_mx VALUES (21, 1, 'arcading', 5890);
+INSERT INTO articles_hash_mx VALUES (22, 2, 'antipope', 2728);
+INSERT INTO articles_hash_mx VALUES (23, 3, 'abhorring', 6799);
+INSERT INTO articles_hash_mx VALUES (24, 4, 'audacious', 3637);
+INSERT INTO articles_hash_mx VALUES (25, 5, 'antehall', 7707);
+INSERT INTO articles_hash_mx VALUES (26, 6, 'abington', 4545);
+INSERT INTO articles_hash_mx VALUES (27, 7, 'arsenous', 8616);
+INSERT INTO articles_hash_mx VALUES (28, 8, 'aerophyte', 5454);
+INSERT INTO articles_hash_mx VALUES (29, 9, 'amateur', 9524);
+INSERT INTO articles_hash_mx VALUES (30, 10, 'andelee', 6363);
+INSERT INTO articles_hash_mx VALUES (31, 1, 'athwartships', 7271);
+INSERT INTO articles_hash_mx VALUES (32, 2, 'amazon', 11342);
+INSERT INTO articles_hash_mx VALUES (33, 3, 'autochrome', 8180);
+INSERT INTO articles_hash_mx VALUES (34, 4, 'amnestied', 12250);
+INSERT INTO articles_hash_mx VALUES (35, 5, 'aminate', 9089);
+INSERT INTO articles_hash_mx VALUES (36, 6, 'ablation', 13159);
+INSERT INTO articles_hash_mx VALUES (37, 7, 'archduchies', 9997);
+INSERT INTO articles_hash_mx VALUES (38, 8, 'anatine', 14067);
+INSERT INTO articles_hash_mx VALUES (39, 9, 'anchises', 10906);
+INSERT INTO articles_hash_mx VALUES (40, 10, 'attemper', 14976);
+INSERT INTO articles_hash_mx VALUES (41, 1, 'aznavour', 11814);
+INSERT INTO articles_hash_mx VALUES (42, 2, 'ausable', 15885);
+INSERT INTO articles_hash_mx VALUES (43, 3, 'affixal', 12723);
+INSERT INTO articles_hash_mx VALUES (44, 4, 'anteport', 16793);
+INSERT INTO articles_hash_mx VALUES (45, 5, 'afrasia', 864);
+INSERT INTO articles_hash_mx VALUES (46, 6, 'atlanta', 17702);
+INSERT INTO articles_hash_mx VALUES (47, 7, 'abeyance', 1772);
+INSERT INTO articles_hash_mx VALUES (48, 8, 'alkylic', 18610);
+INSERT INTO articles_hash_mx VALUES (49, 9, 'anyone', 2681);
+INSERT INTO articles_hash_mx VALUES (50, 10, 'anjanette', 19519);
+
+
+
+SET citus.task_executor_type TO 'real-time';
+SET citus.large_table_shard_count TO 2;
+SET client_min_messages TO 'DEBUG2';
+
+-- insert a single row for the test
+INSERT INTO articles_single_shard_hash_mx VALUES (50, 10, 'anjanette', 19519);
+
+-- single-shard tests
+
+-- test simple select for a single row
+SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50;
+
+-- get all titles by a single author
+SELECT title FROM articles_hash_mx WHERE author_id = 10;
+
+-- try ordering them by word count
+SELECT title, word_count FROM articles_hash_mx
+ WHERE author_id = 10
+ ORDER BY word_count DESC NULLS LAST;
+
+-- look at last two articles by an author
+SELECT title, id FROM articles_hash_mx
+ WHERE author_id = 5
+ ORDER BY id
+ LIMIT 2;
+
+-- find all articles by two authors in same shard
+-- but plan is not router executable due to order by
+SELECT title, author_id FROM articles_hash_mx
+ WHERE author_id = 7 OR author_id = 8
+ ORDER BY author_id ASC, id;
+
+-- same query is router executable with no order by
+SELECT title, author_id FROM articles_hash_mx
+ WHERE author_id = 7 OR author_id = 8;
+
+-- add in some grouping expressions, still on same shard
+-- having queries unsupported in Citus
+SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx
+ WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10
+ GROUP BY author_id
+ HAVING sum(word_count) > 1000
+ ORDER BY sum(word_count) DESC;
+
+-- however having clause is supported if it goes to a single shard
+SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx
+ WHERE author_id = 1
+ GROUP BY author_id
+ HAVING sum(word_count) > 1000
+ ORDER BY sum(word_count) DESC;
+
+-- query is a single shard query but can't do shard pruning,
+-- not router-plannable due to <= and IN
+SELECT * FROM articles_hash_mx WHERE author_id <= 1;
+SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3);
+
+-- queries with CTEs are supported
+WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1)
+SELECT * FROM first_author;
+
+-- queries with CTEs are supported even if CTE is not referenced inside query
+WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1)
+SELECT title FROM articles_hash_mx WHERE author_id = 1;
+
+-- two CTE joins are supported if they go to the same worker
+WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
+id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 1)
+SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
+
+WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
+id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3)
+SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
+
+-- CTE joins are not supported if table shards are at different workers
+WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1),
+id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2)
+SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
+
+-- recursive CTEs are supported when filtered on partition column
+
+INSERT INTO company_employees_mx values(1, 1, 0);
+INSERT INTO company_employees_mx values(1, 2, 1);
+INSERT INTO company_employees_mx values(1, 3, 1);
+INSERT INTO company_employees_mx values(1, 4, 2);
+INSERT INTO company_employees_mx values(1, 5, 4);
+
+INSERT INTO company_employees_mx values(3, 1, 0);
+INSERT INTO company_employees_mx values(3, 15, 1);
+INSERT INTO company_employees_mx values(3, 3, 1);
+
+-- find employees at top 2 level within company hierarchy
+WITH RECURSIVE hierarchy as (
+ SELECT *, 1 AS level
+ FROM company_employees_mx
+ WHERE company_id = 1 and manager_id = 0
+ UNION
+ SELECT ce.*, (h.level+1)
+ FROM hierarchy h JOIN company_employees_mx ce
+ ON (h.employee_id = ce.manager_id AND
+ h.company_id = ce.company_id AND
+ ce.company_id = 1))
+SELECT * FROM hierarchy WHERE LEVEL <= 2;
+
+-- query becomes not router plannble and gets rejected
+-- if filter on company is dropped
+WITH RECURSIVE hierarchy as (
+ SELECT *, 1 AS level
+ FROM company_employees_mx
+ WHERE company_id = 1 and manager_id = 0
+ UNION
+ SELECT ce.*, (h.level+1)
+ FROM hierarchy h JOIN company_employees_mx ce
+ ON (h.employee_id = ce.manager_id AND
+ h.company_id = ce.company_id))
+SELECT * FROM hierarchy WHERE LEVEL <= 2;
+
+-- logically wrong query, query involves different shards
+-- from the same table, but still router plannable due to
+-- shard being placed on the same worker.
+WITH RECURSIVE hierarchy as (
+ SELECT *, 1 AS level
+ FROM company_employees_mx
+ WHERE company_id = 3 and manager_id = 0
+ UNION
+ SELECT ce.*, (h.level+1)
+ FROM hierarchy h JOIN company_employees_mx ce
+ ON (h.employee_id = ce.manager_id AND
+ h.company_id = ce.company_id AND
+ ce.company_id = 2))
+SELECT * FROM hierarchy WHERE LEVEL <= 2;
+
+-- grouping sets are supported on single shard
+SELECT
+ id, substring(title, 2, 1) AS subtitle, count(*)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 3
+ GROUP BY GROUPING SETS ((id),(subtitle));
+
+-- grouping sets are not supported on multiple shards
+SELECT
+ id, substring(title, 2, 1) AS subtitle, count(*)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 2
+ GROUP BY GROUPING SETS ((id),(subtitle));
+
+-- queries which involve functions in FROM clause are supported if it goes to a single worker.
+SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1;
+
+SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3;
+
+-- they are not supported if multiple workers are involved
+SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2;
+
+-- subqueries are not supported in WHERE clause in Citus
+SELECT * FROM articles_hash_mx WHERE author_id IN (SELECT id FROM authors_hash_mx WHERE name LIKE '%a');
+
+SELECT * FROM articles_hash_mx WHERE author_id IN (SELECT author_id FROM articles_hash_mx WHERE author_id = 1 or author_id = 3);
+
+SELECT * FROM articles_hash_mx WHERE author_id = (SELECT 1);
+
+
+-- subqueries are supported in FROM clause but they are not router plannable
+SELECT articles_hash_mx.id,test.word_count
+FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id
+ORDER BY articles_hash_mx.id;
+
+
+SELECT articles_hash_mx.id,test.word_count
+FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test
+WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1
+ORDER BY articles_hash_mx.id;
+
+-- subqueries are not supported in SELECT clause
+SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1)
+ AS special_price FROM articles_hash_mx a;
+
+-- simple lookup query
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+-- below query hits a single shard, router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 OR author_id = 17;
+
+-- below query hits two shards, not router plannable + not router executable
+-- handled by real-time executor
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 OR author_id = 18;
+
+-- rename the output columns
+SELECT id as article_id, word_count * id as random_value
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+-- we can push down co-located joins to a single worker
+SELECT a.author_id as first_author, b.word_count as second_word_count
+ FROM articles_hash_mx a, articles_hash_mx b
+ WHERE a.author_id = 10 and a.author_id = b.author_id
+ LIMIT 3;
+
+-- following join is router plannable since the same worker
+-- has both shards
+SELECT a.author_id as first_author, b.word_count as second_word_count
+ FROM articles_hash_mx a, articles_single_shard_hash_mx b
+ WHERE a.author_id = 10 and a.author_id = b.author_id
+ LIMIT 3;
+
+-- following join is not router plannable since there are no
+-- workers containing both shards, added a CTE to make this fail
+-- at logical planner
+WITH single_shard as (SELECT * FROM articles_single_shard_hash_mx)
+SELECT a.author_id as first_author, b.word_count as second_word_count
+ FROM articles_hash_mx a, single_shard b
+ WHERE a.author_id = 2 and a.author_id = b.author_id
+ LIMIT 3;
+
+-- single shard select with limit is router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ LIMIT 3;
+
+-- single shard select with limit + offset is router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ LIMIT 2
+ OFFSET 1;
+
+-- single shard select with limit + offset + order by is router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id desc
+ LIMIT 2
+ OFFSET 1;
+
+-- single shard select with group by on non-partition column is router plannable
+SELECT id
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ GROUP BY id
+ ORDER BY id;
+
+-- single shard select with distinct is router plannable
+SELECT distinct id
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+
+-- single shard aggregate is router plannable
+SELECT avg(word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 2;
+
+-- max, min, sum, count are router plannable on single shard
+SELECT max(word_count) as max, min(word_count) as min,
+ sum(word_count) as sum, count(word_count) as cnt
+ FROM articles_hash_mx
+ WHERE author_id = 2;
+
+
+-- queries with aggregates and group by supported on single shard
+SELECT max(word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ GROUP BY author_id;
+
+
+-- router plannable union queries are supported
+(SELECT * FROM articles_hash_mx WHERE author_id = 1)
+UNION
+(SELECT * FROM articles_hash_mx WHERE author_id = 3);
+
+SELECT * FROM (
+ (SELECT * FROM articles_hash_mx WHERE author_id = 1)
+ UNION
+ (SELECT * FROM articles_hash_mx WHERE author_id = 3)) uu;
+
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1)
+UNION
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3);
+
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1)
+INTERSECT
+(SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3);
+
+(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1)
+EXCEPT
+(SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3);
+
+-- union queries are not supported if not router plannable
+-- there is an inconsistency on shard pruning between
+-- ubuntu/mac disabling log messages for this queries only
+
+SET client_min_messages to 'NOTICE';
+
+(SELECT * FROM articles_hash_mx WHERE author_id = 1)
+UNION
+(SELECT * FROM articles_hash_mx WHERE author_id = 2);
+
+
+SELECT * FROM (
+ (SELECT * FROM articles_hash_mx WHERE author_id = 1)
+ UNION
+ (SELECT * FROM articles_hash_mx WHERE author_id = 2)) uu;
+
+-- error out for queries with repartition jobs
+SELECT *
+ FROM articles_hash_mx a, articles_hash_mx b
+ WHERE a.id = b.id AND a.author_id = 1;
+
+-- queries which hit more than 1 shards are not router plannable or executable
+-- handled by real-time executor
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id >= 1 AND author_id <= 3;
+
+SET citus.task_executor_type TO 'real-time';
+
+-- Test various filtering options for router plannable check
+SET client_min_messages to 'DEBUG2';
+
+-- this is definitely single shard
+-- and router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and author_id >= 1;
+
+-- not router plannable due to or
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 or id = 1;
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and (id = 1 or id = 41);
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and (id = random()::int * 0);
+
+-- not router plannable due to function call on the right side
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = (random()::int * 0 + 1);
+
+-- not router plannable due to or
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 or id = 1;
+
+-- router plannable due to abs(-1) getting converted to 1 by postgresql
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = abs(-1);
+
+-- not router plannable due to abs() function
+SELECT *
+ FROM articles_hash_mx
+ WHERE 1 = abs(author_id);
+
+-- not router plannable due to abs() function
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = abs(author_id - 2);
+
+-- router plannable, function on different field
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1 and (id = abs(id - 2));
+
+-- not router plannable due to is true
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) is true;
+
+-- router plannable, (boolean expression) = true is collapsed to (boolean expression)
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) = true;
+
+-- router plannable, between operator is on another column
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) and id between 0 and 20;
+
+-- router plannable, partition column expression is and'ed to rest
+SELECT *
+ FROM articles_hash_mx
+ WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s';
+
+-- router plannable, order is changed
+SELECT *
+ FROM articles_hash_mx
+ WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1);
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE (title like '%s' or title like 'a%') and (author_id = 1);
+
+-- router plannable
+SELECT *
+ FROM articles_hash_mx
+ WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000);
+
+-- window functions are supported if query is router plannable
+SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
+ FROM articles_hash_mx
+ WHERE author_id = 5;
+
+SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
+ FROM articles_hash_mx
+ WHERE author_id = 5
+ ORDER BY word_count DESC;
+
+SELECT id, MIN(id) over (order by word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+SELECT id, word_count, AVG(word_count) over (order by word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+-- window functions are not supported for not router plannable queries
+SELECT id, MIN(id) over (order by word_count)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 2;
+
+SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count
+ FROM articles_hash_mx
+ WHERE author_id = 5 or author_id = 2;
+
+-- complex query hitting a single shard
+SELECT
+ count(DISTINCT CASE
+ WHEN
+ word_count > 100
+ THEN
+ id
+ ELSE
+ NULL
+ END) as c
+ FROM
+ articles_hash_mx
+ WHERE
+ author_id = 5;
+
+-- same query is not router plannable if hits multiple shards
+SELECT
+ count(DISTINCT CASE
+ WHEN
+ word_count > 100
+ THEN
+ id
+ ELSE
+ NULL
+ END) as c
+ FROM
+ articles_hash_mx
+ GROUP BY
+ author_id;
+
+-- queries inside transactions can be router plannable
+BEGIN;
+SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+END;
+
+-- cursor queries are router plannable
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+FETCH test_cursor;
+FETCH test_cursor;
+END;
+
+-- queries inside copy can be router plannable
+COPY (
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id) TO STDOUT;
+
+-- table creation queries inside can be router plannable
+CREATE TEMP TABLE temp_articles_hash_mx as
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1
+ ORDER BY id;
+
+-- router plannable queries may include filter for aggragates
+SELECT count(*), count(*) FILTER (WHERE id < 3)
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+-- non-router plannable queries support filters as well
+SELECT count(*), count(*) FILTER (WHERE id < 3)
+ FROM articles_hash_mx
+ WHERE author_id = 1 or author_id = 2;
+
+-- prepare queries can be router plannable
+PREPARE author_1_articles as
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = 1;
+
+EXECUTE author_1_articles;
+
+-- parametric prepare queries can be router plannable
+PREPARE author_articles(int) as
+ SELECT *
+ FROM articles_hash_mx
+ WHERE author_id = $1;
+
+EXECUTE author_articles(1);
+
+-- queries inside plpgsql functions could be router plannable
+CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$
+DECLARE
+ max_id integer;
+BEGIN
+ SELECT MAX(id) FROM articles_hash_mx ah
+ WHERE author_id = 1
+ into max_id;
+ return max_id;
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT author_articles_max_id();
+
+-- plpgsql function that return query results are not router plannable
+CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$
+DECLARE
+BEGIN
+ RETURN QUERY
+ SELECT ah.id, ah.word_count
+ FROM articles_hash_mx ah
+ WHERE author_id = 1;
+
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT * FROM author_articles_id_word_count();
+
+-- materialized views can be created for router plannable queries
+CREATE MATERIALIZED VIEW mv_articles_hash_mx AS
+ SELECT * FROM articles_hash_mx WHERE author_id = 1;
+
+SELECT * FROM mv_articles_hash_mx;
+
+SET client_min_messages to 'INFO';
+DROP MATERIALIZED VIEW mv_articles_hash_mx;
+SET client_min_messages to 'DEBUG2';
+
+CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS
+ SELECT * FROM articles_hash_mx WHERE author_id in (1,2);
+
+-- router planner/executor is disabled for task-tracker executor
+-- following query is router plannable, but router planner is disabled
+
+-- TODO: Uncomment once we fix task-tracker issue
+--SET citus.task_executor_type to 'task-tracker';
+--SELECT id
+-- FROM articles_hash_mx
+-- WHERE author_id = 1;
+
+-- insert query is router plannable even under task-tracker
+INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814);
+
+-- verify insert is successfull (not router plannable and executable)
+SELECT id
+ FROM articles_hash_mx
+ WHERE author_id = 1;
diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql
new file mode 100644
index 000000000..de631df22
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_schema_support.sql
@@ -0,0 +1,221 @@
+--
+-- MULTI_MX_SCHEMA_SUPPORT
+--
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1210000;
+
+-- connect to a worker node and run some queries
+\c - - - :worker_1_port
+
+-- test very basic queries
+SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4;
+SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4;
+
+
+-- test cursors
+SET search_path TO public;
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM nation_hash
+ WHERE n_nationkey = 1;
+FETCH test_cursor;
+END;
+
+-- test with search_path is set
+SET search_path TO citus_mx_test_schema;
+BEGIN;
+DECLARE test_cursor CURSOR FOR
+ SELECT *
+ FROM nation_hash
+ WHERE n_nationkey = 1;
+FETCH test_cursor;
+END;
+
+
+-- test inserting to table in different schema
+SET search_path TO public;
+
+INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (100, 'TURKEY', 3);
+
+-- verify insertion
+SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100;
+
+-- test with search_path is set
+SET search_path TO citus_mx_test_schema;
+
+INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY', 3);
+
+-- verify insertion
+SELECT * FROM nation_hash WHERE n_nationkey = 101;
+
+-- TODO: add UPDATE/DELETE/UPSERT
+
+
+-- test UDFs with schemas
+SET search_path TO public;
+
+
+-- UDF in public, table in a schema other than public, search_path is not set
+SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+
+-- UDF in public, table in a schema other than public, search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+
+
+-- UDF in schema, table in a schema other than public, search_path is not set
+SET search_path TO public;
+SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+
+-- UDF in schema, table in a schema other than public, search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5;
+
+
+-- test operators with schema
+SET search_path TO public;
+
+-- test with search_path is not set
+SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1;
+
+-- test with search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1;
+
+
+SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path;
+SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english;
+
+SET search_path TO citus_mx_test_schema;
+
+SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC;
+SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english;
+
+
+SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC;
+
+--test with search_path is set
+SET search_path TO citus_mx_test_schema;
+SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC;
+
+
+-- check when search_path is public,
+-- join of two tables which are in different schemas,
+-- join on partition column
+SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+
+-- check when search_path is different than public,
+-- join of two tables which are in different schemas,
+-- join on partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+
+-- check when search_path is public,
+-- join of two tables which are in same schemas,
+-- join on partition column
+SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+
+-- check when search_path is different than public,
+-- join of two tables which are in same schemas,
+-- join on partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, nation_hash_2 n2
+WHERE
+ n1.n_nationkey = n2.n_nationkey;
+
+-- single repartition joins
+SET citus.task_executor_type TO "task-tracker";
+
+-- check when search_path is public,
+-- join of two tables which are in different schemas,
+-- join on partition column and non-partition column
+--SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_regionkey;
+
+-- check when search_path is different than public,
+-- join of two tables which are in different schemas,
+-- join on partition column and non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_nationkey = n2.n_regionkey;
+
+-- check when search_path is different than public,
+-- join of two tables which are in same schemas,
+-- join on partition column and non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, nation_hash_2 n2
+WHERE
+ n1.n_nationkey = n2.n_regionkey;
+
+-- hash repartition joins
+
+-- check when search_path is public,
+-- join of two tables which are in different schemas,
+-- join on non-partition column
+SET search_path TO public;
+SELECT
+ count (*)
+FROM
+ citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_regionkey = n2.n_regionkey;
+
+-- check when search_path is different than public,
+-- join of two tables which are in different schemas,
+-- join on non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2
+WHERE
+ n1.n_regionkey = n2.n_regionkey;
+
+-- check when search_path is different than public,
+-- join of two tables which are in same schemas,
+-- join on non-partition column
+SET search_path TO citus_mx_test_schema_join_1;
+SELECT
+ count (*)
+FROM
+ nation_hash n1, nation_hash_2 n2
+WHERE
+ n1.n_regionkey = n2.n_regionkey;
+
+-- set task_executor back to real-time
+SET citus.task_executor_type TO "real-time";
+
diff --git a/src/test/regress/sql/multi_mx_tpch_query1.sql b/src/test/regress/sql/multi_mx_tpch_query1.sql
new file mode 100644
index 000000000..ef80029c6
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query1.sql
@@ -0,0 +1,106 @@
+--
+-- MULTI_MX_TPCH_QUERY1
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #1 from the TPC-H decision support benchmark
+
+SELECT
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate <= date '1998-12-01' - interval '90 days'
+GROUP BY
+ l_returnflag,
+ l_linestatus
+ORDER BY
+ l_returnflag,
+ l_linestatus;
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #1 from the TPC-H decision support benchmark
+
+SELECT
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate <= date '1998-12-01' - interval '90 days'
+GROUP BY
+ l_returnflag,
+ l_linestatus
+ORDER BY
+ l_returnflag,
+ l_linestatus;
+
+ -- connect to the other node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #1 from the TPC-H decision support benchmark
+
+SELECT
+ l_returnflag,
+ l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate <= date '1998-12-01' - interval '90 days'
+GROUP BY
+ l_returnflag,
+ l_linestatus
+ORDER BY
+ l_returnflag,
+ l_linestatus;
\ No newline at end of file
diff --git a/src/test/regress/sql/multi_mx_tpch_query10.sql b/src/test/regress/sql/multi_mx_tpch_query10.sql
new file mode 100644
index 000000000..5af46c8a9
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query10.sql
@@ -0,0 +1,126 @@
+--
+-- MULTI_MX_TPCH_QUERY10
+--
+
+-- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests,
+-- we don't set citus.large_table_shard_count here, and instead use the default value
+-- coming from postgresql.conf or multi_task_tracker_executor.conf.
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
+
+-- connect to master
+\c - - - :master_port
+
+SELECT
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx,
+ nation_mx
+WHERE
+ c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate >= date '1993-10-01'
+ AND o_orderdate < date '1993-10-01' + interval '3' month
+ AND l_returnflag = 'R'
+ AND c_nationkey = n_nationkey
+GROUP BY
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+ORDER BY
+ revenue DESC
+LIMIT 20;
+
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
+
+SELECT
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx,
+ nation_mx
+WHERE
+ c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate >= date '1993-10-01'
+ AND o_orderdate < date '1993-10-01' + interval '3' month
+ AND l_returnflag = 'R'
+ AND c_nationkey = n_nationkey
+GROUP BY
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+ORDER BY
+ revenue DESC
+LIMIT 20;
+
+-- connect to the other worker
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
+
+SELECT
+ c_custkey,
+ c_name,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ c_acctbal,
+ n_name,
+ c_address,
+ c_phone,
+ c_comment
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx,
+ nation_mx
+WHERE
+ c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate >= date '1993-10-01'
+ AND o_orderdate < date '1993-10-01' + interval '3' month
+ AND l_returnflag = 'R'
+ AND c_nationkey = n_nationkey
+GROUP BY
+ c_custkey,
+ c_name,
+ c_acctbal,
+ c_phone,
+ n_name,
+ c_address,
+ c_comment
+ORDER BY
+ revenue DESC
+LIMIT 20;
diff --git a/src/test/regress/sql/multi_mx_tpch_query12.sql b/src/test/regress/sql/multi_mx_tpch_query12.sql
new file mode 100644
index 000000000..a15d90d57
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query12.sql
@@ -0,0 +1,127 @@
+--
+-- MULTI_MX_TPCH_QUERY12
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #12 from the TPC-H decision support benchmark
+
+SELECT
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ OR o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ AND o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) AS low_line_count
+FROM
+ orders_mx,
+ lineitem_mx
+WHERE
+ o_orderkey = l_orderkey
+ AND l_shipmode in ('MAIL', 'SHIP')
+ AND l_commitdate < l_receiptdate
+ AND l_shipdate < l_commitdate
+ AND l_receiptdate >= date '1994-01-01'
+ AND l_receiptdate < date '1994-01-01' + interval '1' year
+GROUP BY
+ l_shipmode
+ORDER BY
+ l_shipmode;
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #12 from the TPC-H decision support benchmark
+
+SELECT
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ OR o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ AND o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) AS low_line_count
+FROM
+ orders_mx,
+ lineitem_mx
+WHERE
+ o_orderkey = l_orderkey
+ AND l_shipmode in ('MAIL', 'SHIP')
+ AND l_commitdate < l_receiptdate
+ AND l_shipdate < l_commitdate
+ AND l_receiptdate >= date '1994-01-01'
+ AND l_receiptdate < date '1994-01-01' + interval '1' year
+GROUP BY
+ l_shipmode
+ORDER BY
+ l_shipmode;
+
+-- connect to the other worker node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #12 from the TPC-H decision support benchmark
+
+SELECT
+ l_shipmode,
+ sum(case
+ when o_orderpriority = '1-URGENT'
+ OR o_orderpriority = '2-HIGH'
+ then 1
+ else 0
+ end) as high_line_count,
+ sum(case
+ when o_orderpriority <> '1-URGENT'
+ AND o_orderpriority <> '2-HIGH'
+ then 1
+ else 0
+ end) AS low_line_count
+FROM
+ orders_mx,
+ lineitem_mx
+WHERE
+ o_orderkey = l_orderkey
+ AND l_shipmode in ('MAIL', 'SHIP')
+ AND l_commitdate < l_receiptdate
+ AND l_shipdate < l_commitdate
+ AND l_receiptdate >= date '1994-01-01'
+ AND l_receiptdate < date '1994-01-01' + interval '1' year
+GROUP BY
+ l_shipmode
+ORDER BY
+ l_shipmode;
diff --git a/src/test/regress/sql/multi_mx_tpch_query14.sql b/src/test/regress/sql/multi_mx_tpch_query14.sql
new file mode 100644
index 000000000..8cc4ad083
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query14.sql
@@ -0,0 +1,82 @@
+--
+-- MULTI_MX_TPCH_QUERY14
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #14 from the TPC-H decision support benchmark
+
+SELECT
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ l_partkey = p_partkey
+ AND l_shipdate >= date '1995-09-01'
+ AND l_shipdate < date '1995-09-01' + interval '1' year;
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #14 from the TPC-H decision support benchmark
+
+SELECT
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ l_partkey = p_partkey
+ AND l_shipdate >= date '1995-09-01'
+ AND l_shipdate < date '1995-09-01' + interval '1' year;
+
+ -- connect to the other node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #14 from the TPC-H decision support benchmark
+
+SELECT
+ 100.00 * sum(case
+ when p_type like 'PROMO%'
+ then l_extendedprice * (1 - l_discount)
+ else 0
+ end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ l_partkey = p_partkey
+ AND l_shipdate >= date '1995-09-01'
+ AND l_shipdate < date '1995-09-01' + interval '1' year;
\ No newline at end of file
diff --git a/src/test/regress/sql/multi_mx_tpch_query19.sql b/src/test/regress/sql/multi_mx_tpch_query19.sql
new file mode 100644
index 000000000..ec2c0d6de
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query19.sql
@@ -0,0 +1,133 @@
+--
+-- MULTI_MX_TPCH_QUERY19
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #19 from the TPC-H decision support benchmark. Note that we modified
+-- the query from its original to make it work on smaller data sets.
+
+SELECT
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15')
+ AND l_quantity >= 10
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#23' OR p_brand='Brand#24')
+ AND l_quantity >= 20
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35')
+ AND l_quantity >= 1
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ );
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #19 from the TPC-H decision support benchmark. Note that we modified
+-- the query from its original to make it work on smaller data sets.
+
+SELECT
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15')
+ AND l_quantity >= 10
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#23' OR p_brand='Brand#24')
+ AND l_quantity >= 20
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35')
+ AND l_quantity >= 1
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ );
+
+-- connect to the other node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #19 from the TPC-H decision support benchmark. Note that we modified
+-- the query from its original to make it work on smaller data sets.
+
+SELECT
+ sum(l_extendedprice* (1 - l_discount)) as revenue
+FROM
+ lineitem_mx,
+ part_mx
+WHERE
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15')
+ AND l_quantity >= 10
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#23' OR p_brand='Brand#24')
+ AND l_quantity >= 20
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ )
+ OR
+ (
+ p_partkey = l_partkey
+ AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35')
+ AND l_quantity >= 1
+ AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK')
+ AND l_shipinstruct = 'DELIVER IN PERSON'
+ );
diff --git a/src/test/regress/sql/multi_mx_tpch_query3.sql b/src/test/regress/sql/multi_mx_tpch_query3.sql
new file mode 100644
index 000000000..26560575f
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query3.sql
@@ -0,0 +1,95 @@
+--
+-- MULTI_MX_TPCH_QUERY3
+--
+
+-- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests,
+-- we don't set citus.large_table_shard_count here, and instead use the default value
+-- coming from postgresql.conf or multi_task_tracker_executor.conf.
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+SELECT
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx
+WHERE
+ c_mktsegment = 'BUILDING'
+ AND c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate < date '1995-03-15'
+ AND l_shipdate > date '1995-03-15'
+GROUP BY
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+ORDER BY
+ revenue DESC,
+ o_orderdate;
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
+
+SELECT
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx
+WHERE
+ c_mktsegment = 'BUILDING'
+ AND c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate < date '1995-03-15'
+ AND l_shipdate > date '1995-03-15'
+GROUP BY
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+ORDER BY
+ revenue DESC,
+ o_orderdate;
+
+-- connect to the other node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
+
+SELECT
+ l_orderkey,
+ sum(l_extendedprice * (1 - l_discount)) as revenue,
+ o_orderdate,
+ o_shippriority
+FROM
+ customer_mx,
+ orders_mx,
+ lineitem_mx
+WHERE
+ c_mktsegment = 'BUILDING'
+ AND c_custkey = o_custkey
+ AND l_orderkey = o_orderkey
+ AND o_orderdate < date '1995-03-15'
+ AND l_shipdate > date '1995-03-15'
+GROUP BY
+ l_orderkey,
+ o_orderdate,
+ o_shippriority
+ORDER BY
+ revenue DESC,
+ o_orderdate;
diff --git a/src/test/regress/sql/multi_mx_tpch_query6.sql b/src/test/regress/sql/multi_mx_tpch_query6.sql
new file mode 100644
index 000000000..2e52ab5a9
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query6.sql
@@ -0,0 +1,70 @@
+--
+-- MULTI_MX_TPCH_QUERY6
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #6 from the TPC-H decision support benchmark
+
+SELECT
+ sum(l_extendedprice * l_discount) as revenue
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1 year'
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+
+-- connect to one of the worker nodes
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #6 from the TPC-H decision support benchmark
+
+SELECT
+ sum(l_extendedprice * l_discount) as revenue
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1 year'
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+
+-- connect to the other worker node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
+
+-- Change configuration to treat lineitem and orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #6 from the TPC-H decision support benchmark
+
+SELECT
+ sum(l_extendedprice * l_discount) as revenue
+FROM
+ lineitem_mx
+WHERE
+ l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1 year'
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
diff --git a/src/test/regress/sql/multi_mx_tpch_query7.sql b/src/test/regress/sql/multi_mx_tpch_query7.sql
new file mode 100644
index 000000000..2e1739d87
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query7.sql
@@ -0,0 +1,160 @@
+--
+-- MULTI_MX_TPCH_QUERY7
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem AND orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #7 from the TPC-H decision support benchmark
+
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+FROM
+ (
+ SELECT
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year FROM l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = n1.n_nationkey
+ AND c_nationkey = n2.n_nationkey
+ AND (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) as shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+
+-- connect one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
+
+-- Change configuration to treat lineitem AND orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #7 from the TPC-H decision support benchmark
+
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+FROM
+ (
+ SELECT
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year FROM l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = n1.n_nationkey
+ AND c_nationkey = n2.n_nationkey
+ AND (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) as shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+
+-- connect to the other worker node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
+
+-- Change configuration to treat lineitem AND orders tables as large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #7 from the TPC-H decision support benchmark
+
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) as revenue
+FROM
+ (
+ SELECT
+ n1.n_name as supp_nation,
+ n2.n_name as cust_nation,
+ extract(year FROM l_shipdate) as l_year,
+ l_extendedprice * (1 - l_discount) as volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = n1.n_nationkey
+ AND c_nationkey = n2.n_nationkey
+ AND (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) as shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
diff --git a/src/test/regress/sql/multi_mx_tpch_query7_nested.sql b/src/test/regress/sql/multi_mx_tpch_query7_nested.sql
new file mode 100644
index 000000000..702d8c91d
--- /dev/null
+++ b/src/test/regress/sql/multi_mx_tpch_query7_nested.sql
@@ -0,0 +1,187 @@
+--
+-- MULTI_MX_TPCH_QUERY7_NESTED
+--
+
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
+
+-- connect to the schema node
+\c - - - :master_port
+
+-- Change configuration to treat lineitem AND orders tables AS large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #7 from the TPC-H benchmark; modified to include sub-selects
+
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) AS revenue
+FROM
+ (
+ SELECT
+ supp_nation,
+ cust_nation,
+ extract(year FROM l_shipdate) AS l_year,
+ l_extendedprice * (1 - l_discount) AS volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ (
+ SELECT
+ n1.n_nationkey AS supp_nation_key,
+ n2.n_nationkey AS cust_nation_key,
+ n1.n_name AS supp_nation,
+ n2.n_name AS cust_nation
+ FROM
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ ) AS temp
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = supp_nation_key
+ AND c_nationkey = cust_nation_key
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) AS shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+
+-- connect to one of the workers
+\c - - - :worker_1_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
+
+-- Change configuration to treat lineitem AND orders tables AS large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #7 from the TPC-H benchmark; modified to include sub-selects
+
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) AS revenue
+FROM
+ (
+ SELECT
+ supp_nation,
+ cust_nation,
+ extract(year FROM l_shipdate) AS l_year,
+ l_extendedprice * (1 - l_discount) AS volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ (
+ SELECT
+ n1.n_nationkey AS supp_nation_key,
+ n2.n_nationkey AS cust_nation_key,
+ n1.n_name AS supp_nation,
+ n2.n_name AS cust_nation
+ FROM
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ ) AS temp
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = supp_nation_key
+ AND c_nationkey = cust_nation_key
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) AS shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;
+
+-- connect to the schema node
+\c - - - :worker_2_port
+
+ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
+ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
+
+-- Change configuration to treat lineitem AND orders tables AS large
+
+SET citus.large_table_shard_count TO 2;
+
+-- Query #7 from the TPC-H benchmark; modified to include sub-selects
+
+SELECT
+ supp_nation,
+ cust_nation,
+ l_year,
+ sum(volume) AS revenue
+FROM
+ (
+ SELECT
+ supp_nation,
+ cust_nation,
+ extract(year FROM l_shipdate) AS l_year,
+ l_extendedprice * (1 - l_discount) AS volume
+ FROM
+ supplier_mx,
+ lineitem_mx,
+ orders_mx,
+ customer_mx,
+ (
+ SELECT
+ n1.n_nationkey AS supp_nation_key,
+ n2.n_nationkey AS cust_nation_key,
+ n1.n_name AS supp_nation,
+ n2.n_name AS cust_nation
+ FROM
+ nation_mx n1,
+ nation_mx n2
+ WHERE
+ (
+ (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY')
+ OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE')
+ )
+ ) AS temp
+ WHERE
+ s_suppkey = l_suppkey
+ AND o_orderkey = l_orderkey
+ AND c_custkey = o_custkey
+ AND s_nationkey = supp_nation_key
+ AND c_nationkey = cust_nation_key
+ AND l_shipdate between date '1995-01-01' AND date '1996-12-31'
+ ) AS shipping
+GROUP BY
+ supp_nation,
+ cust_nation,
+ l_year
+ORDER BY
+ supp_nation,
+ cust_nation,
+ l_year;