Merge pull request #5377 from citusdata/turn_mx_on_multi_schedule

Fix tests that fail with MX in multi_schedule
pull/5378/head
Halil Ozan Akgül 2021-10-15 13:08:30 +03:00 committed by GitHub
commit eca784d088
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 32 additions and 136 deletions

View File

@ -219,8 +219,8 @@ SELECT logicalrelid::text FROM pg_dist_partition WHERE logicalrelid::regclass::t
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname LIKE 'partitioned\_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,6)
(localhost,57638,t,6)
(localhost,57637,t,9)
(localhost,57638,t,9)
(2 rows)
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'partitioned_table'::regclass ORDER BY 1;
@ -291,8 +291,8 @@ SELECT logicalrelid::text FROM pg_dist_partition WHERE logicalrelid::regclass::t
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname LIKE 'partitioned\_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,15)
(localhost,57638,t,15)
(localhost,57637,t,18)
(localhost,57638,t,18)
(2 rows)
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'partitioned_table'::regclass ORDER BY 1;
@ -497,45 +497,6 @@ SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHE
(1 row)
\endif
-- test with metadata sync
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
CREATE TABLE metadata_sync_table (a BIGSERIAL);
SELECT create_distributed_table('metadata_sync_table', 'a', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT alter_distributed_table('metadata_sync_table', shard_count:=6);
alter_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT alter_distributed_table('metadata_sync_table', shard_count:=8);
alter_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT table_name, shard_count FROM public.citus_tables WHERE table_name::text = 'metadata_sync_table';
table_name | shard_count
---------------------------------------------------------------------
metadata_sync_table | 8
(1 row)
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
-- test complex cascade operations
CREATE TABLE cas_1 (a INT UNIQUE);
CREATE TABLE cas_2 (a INT UNIQUE);

View File

@ -87,8 +87,8 @@ SELECT logicalrelid::text FROM pg_dist_partition WHERE logicalrelid::regclass::t
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname LIKE 'partitioned\_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,6)
(localhost,57638,t,6)
(localhost,57637,t,9)
(localhost,57638,t,9)
(2 rows)
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'partitioned_table'::regclass ORDER BY 1;
@ -150,8 +150,8 @@ SELECT logicalrelid::text FROM pg_dist_partition WHERE logicalrelid::regclass::t
SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname LIKE 'partitioned\_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,6)
(localhost,57638,t,6)
(localhost,57637,t,9)
(localhost,57638,t,9)
(2 rows)
SELECT inhrelid::regclass::text FROM pg_catalog.pg_inherits WHERE inhparent = 'partitioned_table'::regclass ORDER BY 1;

View File

@ -2,12 +2,6 @@ CREATE SCHEMA drop_column_partitioned_table;
SET search_path TO drop_column_partitioned_table;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 2580000;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
-- create a partitioned table with some columns that
-- are going to be dropped within the tests
CREATE TABLE sensors(
@ -397,9 +391,3 @@ WHERE
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA drop_column_partitioned_table CASCADE;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)

View File

@ -780,7 +780,7 @@ SELECT right(table_name, 7)::int as shardid, * FROM (
table_name, constraint_name, constraint_type
FROM information_schema.table_constraints
WHERE
table_name LIKE 'partitioning_hash_test%' AND
table_name SIMILAR TO 'partitioning_hash_test%\d{2,}' AND
constraint_type = 'FOREIGN KEY'
ORDER BY 1, 2, 3
) q

View File

@ -414,6 +414,7 @@ where val = 'asdf';
-- not replicate reference tables from other test files
SET citus.replicate_reference_tables_on_activate TO off;
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1

View File

@ -194,10 +194,10 @@ SELECT attname || ' ' || attcompression AS column_compression FROM pg_attribute
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
)$$);
column_compression
column_compression
---------------------------------------------------------------------
{"a p","a p","b ","b "}
{"a p","a p","b ","b "}
{"a p","a p","a p","b ","b ","b "}
{"a p","a p","a p","b ","b ","b "}
(2 rows)
-- test column compression propagation in rebalance
@ -220,10 +220,10 @@ NOTICE: cleaned up 1 orphaned shards
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
)$$);
column_compression
column_compression
---------------------------------------------------------------------
{"a p","a p","b ","b "}
{"a p","a p","b ","b "}
{"a p","a p","a p","b ","b ","b "}
{"a p","a p","a p","b ","b ","b "}
(2 rows)
-- test propagation of ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
@ -232,10 +232,10 @@ ALTER TABLE col_compression ALTER COLUMN a SET COMPRESSION default;
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
)$$);
column_compression
column_compression
---------------------------------------------------------------------
{"a ","a ","b p","b p"}
{"a ","a ","b p","b p"}
{"a ","a ","a ","b p","b p","b p"}
{"a ","a ","a ","b p","b p","b p"}
(2 rows)
-- test propagation of ALTER TABLE .. ADD COLUMN .. COMPRESSION ..
@ -243,10 +243,10 @@ ALTER TABLE col_compression ADD COLUMN c TEXT COMPRESSION pglz;
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
)$$);
column_compression
column_compression
---------------------------------------------------------------------
{"a ","a ","b p","b p","c p","c p"}
{"a ","a ","b p","b p","c p","c p"}
{"a ","a ","a ","b p","b p","b p","c p","c p","c p"}
{"a ","a ","a ","b p","b p","b p","c p","c p","c p"}
(2 rows)
-- test attaching to a partitioned table with column compression
@ -641,17 +641,7 @@ CALL proc_pushdown(1, ARRAY[2000,1], 'AAAA');
{2} | 2
(1 row)
-- make sure that metadata is synced, it may take few seconds
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
SELECT wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
-- make sure that metadata is synced
SELECT bool_and(hasmetadata) FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port);
bool_and
---------------------------------------------------------------------
@ -794,21 +784,6 @@ DEBUG: pushing down the procedure
NOTICE: IN passed NUMERIC: 6.0
DETAIL: from localhost:xxxxx
RESET client_min_messages;
-- we don't need metadata syncing anymore
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
NOTICE: dropping metadata on the node (localhost,57637)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
NOTICE: dropping metadata on the node (localhost,57638)
stop_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
-- ALTER STATISTICS .. OWNER TO CURRENT_ROLE
CREATE TABLE st1 (a int, b int);
CREATE STATISTICS role_s1 ON a, b FROM st1;

View File

@ -54,28 +54,23 @@ test: subquery_in_targetlist subquery_in_where subquery_complex_target_list
test: subquery_prepared_statements
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order
test: cte_inline recursive_view_local_table values
test: pg13
test: tableam
test: pg13 pg12
# run pg14 sequentially as it syncs metadata
test: pg14
test: tableam drop_column_partitioned_table
# mx-less tests
test: check_mx
test: turn_mx_off
test: multi_partitioning
# run pg14 sequentially as it syncs metadata
test: pg14
test: pg12
test: drop_column_partitioned_table
test: multi_real_time_transaction
test: undistribute_table
test: alter_table_set_access_method
test: alter_distributed_table
test: turn_mx_on
# ----------
# Miscellaneous tests to check our query planning behavior
# ----------
test: multi_deparse_shard_query multi_distributed_transaction_id intermediate_results limit_intermediate_size rollback_to_savepoint
test: multi_explain hyperscale_tutorial partitioned_intermediate_results distributed_intermediate_results
test: multi_explain hyperscale_tutorial partitioned_intermediate_results distributed_intermediate_results multi_real_time_transaction
test: multi_basic_queries cross_join multi_complex_expressions multi_subquery multi_subquery_complex_queries multi_subquery_behavioral_analytics
test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql
test: sql_procedure multi_function_in_join row_types materialized_view
@ -100,6 +95,8 @@ test: multi_task_assignment_policy multi_cross_shard
test: multi_utility_statements
test: multi_dropped_column_aliases foreign_key_restriction_enforcement
test: binary_protocol
test: alter_table_set_access_method
test: alter_distributed_table
test: issue_5248
# ---------

View File

@ -147,20 +147,6 @@ SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHE
\endif
-- test with metadata sync
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
CREATE TABLE metadata_sync_table (a BIGSERIAL);
SELECT create_distributed_table('metadata_sync_table', 'a', colocate_with:='none');
SELECT alter_distributed_table('metadata_sync_table', shard_count:=6);
SELECT alter_distributed_table('metadata_sync_table', shard_count:=8);
SELECT table_name, shard_count FROM public.citus_tables WHERE table_name::text = 'metadata_sync_table';
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-- test complex cascade operations
CREATE TABLE cas_1 (a INT UNIQUE);
CREATE TABLE cas_2 (a INT UNIQUE);

View File

@ -3,7 +3,6 @@ SET search_path TO drop_column_partitioned_table;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 2580000;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
-- create a partitioned table with some columns that
-- are going to be dropped within the tests
@ -207,6 +206,3 @@ WHERE
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA drop_column_partitioned_table CASCADE;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);

View File

@ -487,7 +487,7 @@ SELECT right(table_name, 7)::int as shardid, * FROM (
table_name, constraint_name, constraint_type
FROM information_schema.table_constraints
WHERE
table_name LIKE 'partitioning_hash_test%' AND
table_name SIMILAR TO 'partitioning_hash_test%\d{2,}' AND
constraint_type = 'FOREIGN KEY'
ORDER BY 1, 2, 3
) q

View File

@ -361,12 +361,7 @@ end;$$;
CALL proc_pushdown(1, NULL, NULL);
CALL proc_pushdown(1, ARRAY[2000,1], 'AAAA');
-- make sure that metadata is synced, it may take few seconds
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
RETURNS void
LANGUAGE C STRICT
AS 'citus';
SELECT wait_until_metadata_sync(30000);
-- make sure that metadata is synced
SELECT bool_and(hasmetadata) FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port);
SELECT create_distributed_table('test_proc_table', 'a');
@ -394,9 +389,6 @@ CALL proc_namedargs_overload(inp=>5);
CALL proc_namedargs_overload(inp=>6.0);
RESET client_min_messages;
-- we don't need metadata syncing anymore
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
-- ALTER STATISTICS .. OWNER TO CURRENT_ROLE
CREATE TABLE st1 (a int, b int);