Fix running PG upgrade tests with run_test.py (#6829)

In #6814 we started using the Python test runner for upgrade tests in
run_test.py, instead of the Perl based one. This had a problem though,
not all tests in minimal_schedule can be run with the Python runner.
This adds a separate minimal schedule for the pg_upgrade tests which
doesn't include the tests that break with the Python runner.

This PR also fixes various other issues that came up while testing
the upgrade tests.
fix_stat_tenants_numeric_bug
Jelte Fennema 2023-04-24 15:54:32 +02:00 committed by GitHub
parent a6a7271e63
commit a5f4fece13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 40 additions and 21 deletions

View File

@ -1,4 +1,10 @@
test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_distributed_triggers_after
test: upgrade_basic_after upgrade_ref2ref_after upgrade_type_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks
# This test cannot be run with run_test.py currently due to its dependence on
# the specific PG versions that we use to run upgrade tests. For now we leave
# it out of the parallel line, so that flaky test detection can at least work
# for the other tests.
test: upgrade_distributed_triggers_after
# This attempts dropping citus extension (and rollbacks), so please do
# not run in parallel with any other tests.

View File

@ -15,7 +15,7 @@ from typing import Optional
import common
from common import REGRESS_DIR, capture, run
from config import ARBITRARY_SCHEDULE_NAMES, MASTER_VERSION, CitusDefaultClusterConfig
from config import ARBITRARY_SCHEDULE_NAMES, MASTER_VERSION, CitusBaseClusterConfig
def main():
@ -178,7 +178,7 @@ def run_schedule_with_python(schedule):
"--bindir": bindir,
}
config = CitusDefaultClusterConfig(args)
config = CitusBaseClusterConfig(args)
common.initialize_temp_dir(config.temp_dir)
common.initialize_citus_cluster(
config.bindir, config.datadir, config.settings, config
@ -242,7 +242,7 @@ def default_base_schedule(test_schedule, args):
return None
if "pg_upgrade" in test_schedule:
return "minimal_schedule"
return "minimal_pg_upgrade_schedule"
if test_schedule in ARBITRARY_SCHEDULE_NAMES:
print(f"WARNING: Arbitrary config schedule ({test_schedule}) is not supported.")
@ -301,9 +301,21 @@ def test_dependencies(test_name, test_schedule, schedule_line, args):
if schedule_line_is_upgrade_after(schedule_line):
# upgrade_xxx_after tests always depend on upgrade_xxx_before
test_names = schedule_line.split()[1:]
before_tests = []
# _after tests have implicit dependencies on _before tests
for test_name in test_names:
if "_after" in test_name:
before_tests.append(test_name.replace("_after", "_before"))
# the upgrade_columnar_before renames the schema, on which other
# "after" tests depend. So we make sure to execute it always.
if "upgrade_columnar_before" not in before_tests:
before_tests.append("upgrade_columnar_before")
return TestDeps(
default_base_schedule(test_schedule, args),
[test_name.replace("_after", "_before")],
before_tests,
)
# before_ tests leave stuff around on purpose for the after tests. So they

View File

@ -9,31 +9,31 @@ SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' and tablename NOT LI
upgrade_basic | tp | tp_pkey | | CREATE UNIQUE INDEX tp_pkey ON upgrade_basic.tp USING btree (a)
(3 rows)
SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard;
SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement;
SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node;
SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node;
SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node;
?column?
---------------------------------------------------------------------
t
(1 row)
SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation;
SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation;
?column?
---------------------------------------------------------------------
t
@ -45,7 +45,7 @@ SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_co
SELECT
CASE WHEN MAX(operation_id) IS NULL
THEN true
ELSE nextval('pg_dist_operationid_seq') = MAX(operation_id)+1
ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id)
END AS check_operationid
FROM pg_dist_cleanup;
check_operationid
@ -56,7 +56,7 @@ SELECT
SELECT
CASE WHEN MAX(record_id) IS NULL
THEN true
ELSE nextval('pg_dist_cleanup_recordid_seq') = MAX(record_id)+1
ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id)
END AS check_recordid
FROM pg_dist_cleanup;
check_recordid
@ -93,8 +93,8 @@ SELECT sequence_name FROM information_schema.sequences
'pg_dist_groupid_seq',
'pg_dist_node_nodeid_seq',
'pg_dist_colocationid_seq',
'pg_dist_operationid_seq',
'pg_dist_cleanup_recordid_seq',
'pg_dist_operationid_seq',
'pg_dist_cleanup_recordid_seq',
'pg_dist_background_job_job_id_seq',
'pg_dist_background_task_task_id_seq',
'pg_dist_clock_logical_seq'

View File

@ -0,0 +1 @@
test: multi_test_helpers multi_test_helpers_superuser multi_test_catalog_views

View File

@ -3,24 +3,24 @@ BEGIN;
-- We have the tablename filter to avoid adding an alternative output for when the coordinator is in metadata vs when not
SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' and tablename NOT LIKE 'r_%' ORDER BY tablename;
SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard;
SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement;
SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node;
SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node;
SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation;
SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard;
SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement;
SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node;
SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node;
SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation;
-- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule
-- but return a valid value in citus upgrade schedule
-- that's why we accept both NULL and MAX()+1 here
SELECT
CASE WHEN MAX(operation_id) IS NULL
THEN true
ELSE nextval('pg_dist_operationid_seq') = MAX(operation_id)+1
ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id)
END AS check_operationid
FROM pg_dist_cleanup;
SELECT
CASE WHEN MAX(record_id) IS NULL
THEN true
ELSE nextval('pg_dist_cleanup_recordid_seq') = MAX(record_id)+1
ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id)
END AS check_recordid
FROM pg_dist_cleanup;
SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job;