Reduce setup time of check-minimal and check-minimal-mx (#6117)

This change reduces the setup time of our minimal schedules in two ways:
1. Don't run `multi_cluster_managament`, but instead run a much smaller
   sql file with almost the same results. `multi_cluster_management`
   adds and removes lots of nodes and tests all kinds of failure
   scenarios. This is not needed for the minimal schedules. The only
   reason we were using it there was to get a working cluster of the
   layout that the tests expected. The new `minimal_cluster_management`
   test achieves this with much less work, going from ~2s to ~0.5s.
2. Parallelize a bit more of the helper tests.
pull/6123/head
Jelte Fennema 2022-08-02 16:58:59 +02:00 committed by GitHub
parent 28e22c4abf
commit 8866d9ac32
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 132 additions and 10 deletions

View File

@ -0,0 +1,64 @@
SET citus.next_shard_id TO 1220000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1;
-- Tests functions related to cluster membership
-- add the nodes to the cluster with the same nodeids and groupids that
-- multi_cluster_management.sql creates
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 18;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 16;
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 16;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 14;
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- Create the same colocation groups as multi_cluster_management.sql
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE cluster_management_test;
CREATE TABLE test_reference_table (y int primary key, name text);
SELECT create_reference_table('test_reference_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
DROP TABLE test_reference_table;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE cluster_management_test;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
CREATE TABLE test_dist (x int, y int);
SELECT create_distributed_table('test_dist', 'x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE test_dist;
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 30;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 18;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 83;

View File

@ -1,8 +1,17 @@
-- create a temporary custom version of this function that's normally defined
-- in multi_test_helpers, so that this file can be run parallel with
-- multi_test_helpers during the minimal schedules
CREATE OR REPLACE FUNCTION run_command_on_master_and_workers_temp(p_sql text)
RETURNS void LANGUAGE plpgsql AS $$
BEGIN
EXECUTE p_sql;
PERFORM run_command_on_workers(p_sql);
END;$$;
-- The following views are intended as alternatives to \d commands, whose
-- output changed in PostgreSQL 10. In particular, they must be used any time
-- a test wishes to print out the structure of a relation, which previously
-- was safely accomplished by a \d invocation.
SELECT run_command_on_master_and_workers(
SELECT run_command_on_master_and_workers_temp(
$desc_views$
CREATE VIEW table_fkey_cols AS
SELECT rc.constraint_name AS "name",
@ -97,8 +106,9 @@ ORDER BY a.attrelid, a.attnum;
$desc_views$
);
run_command_on_master_and_workers
run_command_on_master_and_workers_temp
---------------------------------------------------------------------
(1 row)
DROP FUNCTION run_command_on_master_and_workers_temp(p_sql text);

View File

@ -1,4 +1,2 @@
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
test: multi_cluster_management
test: multi_test_catalog_views
test: tablespace
test: minimal_cluster_management
test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers multi_test_catalog_views tablespace

View File

@ -1,9 +1,8 @@
# ----------
# Only run few basic tests to set up a testing environment
# ----------
test: multi_cluster_management
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: minimal_cluster_management
test: multi_test_helpers multi_test_helpers_superuser multi_test_catalog_views
# the following test has to be run sequentially
test: base_enable_mx

View File

@ -0,0 +1,40 @@
SET citus.next_shard_id TO 1220000;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1;
-- Tests functions related to cluster membership
-- add the nodes to the cluster with the same nodeids and groupids that
-- multi_cluster_management.sql creates
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 18;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 16;
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 16;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 14;
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
-- Create the same colocation groups as multi_cluster_management.sql
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
DROP TABLE cluster_management_test;
CREATE TABLE test_reference_table (y int primary key, name text);
SELECT create_reference_table('test_reference_table');
DROP TABLE test_reference_table;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
DROP TABLE cluster_management_test;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
CREATE TABLE test_dist (x int, y int);
SELECT create_distributed_table('test_dist', 'x');
DROP TABLE test_dist;
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 30;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 18;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 83;

View File

@ -1,8 +1,18 @@
-- create a temporary custom version of this function that's normally defined
-- in multi_test_helpers, so that this file can be run parallel with
-- multi_test_helpers during the minimal schedules
CREATE OR REPLACE FUNCTION run_command_on_master_and_workers_temp(p_sql text)
RETURNS void LANGUAGE plpgsql AS $$
BEGIN
EXECUTE p_sql;
PERFORM run_command_on_workers(p_sql);
END;$$;
-- The following views are intended as alternatives to \d commands, whose
-- output changed in PostgreSQL 10. In particular, they must be used any time
-- a test wishes to print out the structure of a relation, which previously
-- was safely accomplished by a \d invocation.
SELECT run_command_on_master_and_workers(
SELECT run_command_on_master_and_workers_temp(
$desc_views$
CREATE VIEW table_fkey_cols AS
SELECT rc.constraint_name AS "name",
@ -98,3 +108,4 @@ ORDER BY a.attrelid, a.attnum;
$desc_views$
);
DROP FUNCTION run_command_on_master_and_workers_temp(p_sql text);