diff --git a/src/test/regress/.gitignore b/src/test/regress/.gitignore index 6e7bcc802..42f938a46 100644 --- a/src/test/regress/.gitignore +++ b/src/test/regress/.gitignore @@ -6,6 +6,7 @@ /tmp_upgrade/ /tmp_citus_upgrade/ /tmp_citus_tarballs/ +/build/ /results/ /log/ diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 85d519fb7..246a5736f 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -26,6 +26,8 @@ MULTI_REGRESS_OPTS = --inputdir=$(citus_abs_srcdir) $(pg_regress_locale_flags) - pg_upgrade_check = $(citus_abs_srcdir)/upgrade/pg_upgrade_test.py citus_upgrade_check = $(citus_abs_srcdir)/upgrade/citus_upgrade_test.py +template_isolation_files = $(shell find $(citus_abs_srcdir)/spec/ -name '*.spec') +generated_isolation_files = $(patsubst $(citus_abs_srcdir)/spec/%,$(citus_abs_srcdir)/build/specs/%,$(template_isolation_files)) # XXX: Can't actually do useful testruns against install - $libdir # etc will point to the directory configured during postgres' # build. We could copy the installed tree around, but that's quite @@ -48,6 +50,47 @@ check: check-full # check-full triggers all tests that ought to be run routinely check-full: check-multi check-multi-mx check-multi-task-tracker-extra check-worker check-follower-cluster check-failure + +ISOLATION_DEPDIR=.deps/isolation +ISOLATION_BUILDDIR=build/specs + +# this can be used to print a value of variable +# ex: make print-generated_isolation_files +print-% : ; @echo $* = $($*) + +.PHONY: create-symbolic-link + +create-symbolic-link: + mkdir -p $(citus_abs_srcdir)/build + ln -fsn $(citus_abs_srcdir)/expected $(citus_abs_srcdir)/build/ + + +# How this target works: +# cpp is used before running isolation tests to preprocess spec files. +# This way we can include any file we want to. Currently this is used to include mx common part. +# spec files are put to /build/specs for clear separation between generated files and template files +# a symbolic link is created for /expected in build/expected/. +# when running isolation tests, as the inputdir, build is passed so +# it runs the spec files from build/specs and checks the expected output from build/expected. +# /specs is renamed as /spec because postgres first look at the specs file under current directory, +# so this is renamed to avoid that since we are running the isolation tests from build/specs now. +$(generated_isolation_files): $(citus_abs_srcdir)/build/specs/%: $(citus_abs_srcdir)/spec/% + @mkdir -p $(citus_abs_srcdir)/$(ISOLATION_DEPDIR) $(citus_abs_srcdir)/$(ISOLATION_BUILDDIR) + # -MF is used to store dependency files(.Po) in another directory for separation + # -MT is used to change the target of the rule emitted by dependency generation. + # -P is used to inhibit generation of linemarkers in the output from the preprocessor. + # -undef is used to not predefine any system-specific or GCC-specific macros. + # `man cpp` for further information + cd $(citus_abs_srcdir) && cpp -undef -w -P -MMD -MP -MF$(ISOLATION_DEPDIR)/$(*F).Po -MT$@ $< > $@ + + +Isolation_Po_files := $(wildcard $(ISOLATION_DEPDIR)/*.Po) +ifneq (,$(Isolation_Po_files)) +include $(Isolation_Po_files) +endif + +isolation_test_files=$(generated_isolation_files) create-symbolic-link + # using pg_regress_multi_check unnecessarily starts up multiple nodes, which isn't needed # for check-worker. But that's harmless besides a few cycles. check-worker: all @@ -101,7 +144,7 @@ check-failure-non-adaptive-base: all tempinstall-main --server-option=citus.task_executor_type=real-time \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_base_schedule $(EXTRA_TESTS) -check-isolation-non-adaptive: all tempinstall-main +check-isolation-non-adaptive: all tempinstall-main $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ --server-option=citus.task_executor_type=real-time \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) @@ -111,11 +154,11 @@ check-multi-vg: all tempinstall-main --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(VALGRIND_LOG_FILE) \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) -check-isolation: all tempinstall-main +check-isolation: all tempinstall-main $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ - -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) + -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) -check-isolation-base: all tempinstall-main +check-isolation-base: all tempinstall-main $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ -- $(MULTI_REGRESS_OPTS) $(EXTRA_TESTS) diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out index 6950682f0..9d0b9e331 100644 --- a/src/test/regress/expected/isolation_citus_dist_activity.out +++ b/src/test/regress/expected/isolation_citus_dist_activity.out @@ -13,23 +13,22 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-alter-table: ALTER TABLE test_table ADD COLUMN x INT; step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep step s2-view-dist: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; - + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname @@ -37,7 +36,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_ ALTER TABLE test_table ADD COLUMN x INT; coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s3-view-worker: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname @@ -54,13 +53,13 @@ SELECT worker_apply_shard_ddl_command (1300001, 'public', ' ALTER TABLE test_table ADD COLUMN x INT; ')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -76,43 +75,42 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-insert: - INSERT INTO test_table VALUES (100, 100); + INSERT INTO test_table VALUES (100, 100); step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep step s2-view-dist: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; - + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname - INSERT INTO test_table VALUES (100, 100); + INSERT INTO test_table VALUES (100, 100); coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s3-view-worker: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -128,10 +126,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-select: SELECT count(*) FROM test_table; @@ -140,14 +138,13 @@ count 0 step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep step s2-view-dist: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; - + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname @@ -155,7 +152,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_ SELECT count(*) FROM test_table; coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s3-view-worker: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname @@ -164,13 +161,13 @@ SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -186,10 +183,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-select-router: SELECT count(*) FROM test_table WHERE column1 = 55; @@ -198,14 +195,13 @@ count 0 step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep step s2-view-dist: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; - + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname @@ -213,17 +209,17 @@ query query_hostname query_hostport master_query_host_namemaster_query_ SELECT count(*) FROM test_table WHERE column1 = 55; coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s3-view-worker: - SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; diff --git a/src/test/regress/expected/isolation_dump_global_wait_edges.out b/src/test/regress/expected/isolation_dump_global_wait_edges.out index 48626f3ca..74e699f61 100644 --- a/src/test/regress/expected/isolation_dump_global_wait_edges.out +++ b/src/test/regress/expected/isolation_dump_global_wait_edges.out @@ -24,7 +24,6 @@ step detector-dump-wait-edges: waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting; - SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; waiting_transaction_numblocking_transaction_numblocking_transaction_waiting @@ -72,7 +71,6 @@ step detector-dump-wait-edges: waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting; - SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; waiting_transaction_numblocking_transaction_numblocking_transaction_waiting diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index 6aa249981..d93c3d60c 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -6,22 +6,17 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-pub 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -57,7 +52,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -66,11 +61,11 @@ step s2-public-schema: SET search_path TO public; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -82,15 +77,12 @@ create_distributed_table step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -129,22 +121,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-work 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -180,10 +167,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -192,11 +179,11 @@ step s2-public-schema: SET search_path TO public; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -206,20 +193,17 @@ create_distributed_table step s2-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -258,22 +242,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-s 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -309,26 +288,26 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-public-schema: SET search_path TO public; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-worker: <... completed> ?column? @@ -340,15 +319,12 @@ step s1-commit: step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -387,22 +363,17 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-cre 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -438,7 +409,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -448,11 +419,11 @@ step s2-create-schema: SET search_path TO myschema; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -464,15 +435,12 @@ create_distributed_table step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -512,22 +480,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-work 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -563,10 +526,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -576,11 +539,11 @@ step s2-create-schema: SET search_path TO myschema; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -590,20 +553,17 @@ create_distributed_table step s2-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -643,22 +603,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-s 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -694,27 +649,27 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-worker: <... completed> ?column? @@ -726,15 +681,12 @@ step s1-commit: step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -774,22 +726,17 @@ starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2- 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -829,32 +776,32 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s3-use-schema: SET search_path TO myschema; step s3-create-table: - CREATE TABLE t2 (a int, b int); + CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t2', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t2', 'a'); step s1-commit: COMMIT; @@ -864,27 +811,24 @@ create_distributed_table step s2-commit: - COMMIT; + COMMIT; step s3-create-table: <... completed> create_distributed_table step s3-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -924,22 +868,17 @@ starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schem 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -972,7 +911,7 @@ master_remove_node step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -982,52 +921,49 @@ step s2-create-schema: SET search_path TO myschema; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s3-use-schema: SET search_path TO myschema; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table step s3-create-table: - CREATE TABLE t2 (a int, b int); + CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t2', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t2', 'a'); step s2-commit: - COMMIT; + COMMIT; step s3-create-table: <... completed> create_distributed_table step s3-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1067,22 +1003,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1118,13 +1049,13 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -1138,17 +1069,17 @@ step s3-create-schema2: SET search_path TO myschema2; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s3-create-table: - CREATE TABLE t2 (a int, b int); + CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t2', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t2', 'a'); step s1-commit: COMMIT; @@ -1162,23 +1093,20 @@ create_distributed_table step s2-commit: - COMMIT; + COMMIT; step s3-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1219,22 +1147,17 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-pub 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1270,7 +1193,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -1279,7 +1202,7 @@ step s2-public-schema: SET search_path TO public; step s2-create-type: - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); step s1-commit: COMMIT; @@ -1288,15 +1211,12 @@ step s2-create-type: <... completed> step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1336,22 +1256,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2- 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1390,10 +1305,10 @@ step s2-public-schema: SET search_path TO public; step s2-create-type: - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -1404,15 +1319,12 @@ step s1-commit: step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1452,22 +1364,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-s 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1503,30 +1410,30 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; step s2-create-type: - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); step s2-create-table-with-type: - CREATE TABLE t1 (a int, b tt1); + CREATE TABLE t1 (a int, b tt1); -- session needs to have replication factor set to 1, can't do in setup - SET citus.replication_model TO 'streaming'; - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.replication_model TO 'streaming'; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-worker: <... completed> ?column? @@ -1538,15 +1445,12 @@ step s1-commit: step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1587,22 +1491,17 @@ starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-pub 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1638,7 +1537,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -1658,10 +1557,10 @@ create_distributed_function step s2-begin: - BEGIN; + BEGIN; step s2-commit: - COMMIT; + COMMIT; step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); @@ -1672,15 +1571,12 @@ wait_until_metadata_sync step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1720,22 +1616,17 @@ starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2- 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1781,10 +1672,10 @@ create_distributed_function step s2-begin: - BEGIN; + BEGIN; step s2-commit: - COMMIT; + COMMIT; step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); @@ -1793,7 +1684,7 @@ wait_until_metadata_sync step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -1810,15 +1701,12 @@ wait_until_metadata_sync step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); @@ -1858,22 +1746,17 @@ starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2- 1 step s1-print-distributed-objects: SELECT 1 FROM master_add_node('localhost', 57638); - -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); - SELECT master_remove_node('localhost', 57638); ?column? @@ -1906,7 +1789,7 @@ master_remove_node step s2-begin: - BEGIN; + BEGIN; step s2-create-schema: CREATE SCHEMA myschema; @@ -1920,7 +1803,7 @@ create_distributed_function step s2-commit: - COMMIT; + COMMIT; step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); @@ -1932,7 +1815,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? @@ -1949,15 +1832,12 @@ wait_until_metadata_sync step s2-print-distributed-objects: -- print an overview of all distributed objects SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object ORDER BY 1; - -- print if the schema has been created SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); - -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); - -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add';$$); diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 3c69faafd..3d05d6696 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -6,11 +6,10 @@ run_command_on_workers (localhost,57637,t,"GRANT ROLE") (localhost,57638,t,"GRANT ROLE") step s1-grant: - GRANT ALL ON test_table TO test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); - - GRANT ALL ON test_table TO test_user_2; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); + GRANT ALL ON test_table TO test_user_2; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); bool_and @@ -19,19 +18,19 @@ bool_and t step s1-begin-insert: - BEGIN; - SET ROLE test_user_1; - INSERT INTO test_table VALUES (100, 100); + BEGIN; + SET ROLE test_user_1; + INSERT INTO test_table VALUES (100, 100); step s2-begin-insert: - BEGIN; - SET ROLE test_user_2; - INSERT INTO test_table VALUES (200, 200); + BEGIN; + SET ROLE test_user_2; + INSERT INTO test_table VALUES (200, 200); step s3-as-admin: - -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + -- Admin should be able to see all transactions + SELECT count(*) FROM get_all_active_transactions(); + SELECT count(*) FROM get_global_active_transactions(); count @@ -40,10 +39,10 @@ count 4 step s3-as-user-1: - -- User should only be able to see its own transactions - SET ROLE test_user_1; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + -- User should only be able to see its own transactions + SET ROLE test_user_1; + SELECT count(*) FROM get_all_active_transactions(); + SELECT count(*) FROM get_global_active_transactions(); count @@ -52,10 +51,10 @@ count 2 step s3-as-readonly: - -- Other user should not see transactions - SET ROLE test_readonly; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + -- Other user should not see transactions + SET ROLE test_readonly; + SELECT count(*) FROM get_all_active_transactions(); + SELECT count(*) FROM get_global_active_transactions(); count @@ -64,10 +63,10 @@ count 0 step s3-as-monitor: - -- Monitor should see all transactions - SET ROLE test_monitor; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + -- Monitor should see all transactions + SET ROLE test_monitor; + SELECT count(*) FROM get_all_active_transactions(); + SELECT count(*) FROM get_global_active_transactions(); count @@ -76,10 +75,10 @@ count 4 step s1-commit: - COMMIT; + COMMIT; step s2-commit: - COMMIT; + COMMIT; run_command_on_workers diff --git a/src/test/regress/expected/isolation_get_distributed_wait_queries.out b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out similarity index 100% rename from src/test/regress/expected/isolation_get_distributed_wait_queries.out rename to src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out diff --git a/src/test/regress/expected/isolation_master_append_table.out b/src/test/regress/expected/isolation_master_append_table.out index a0d4a26e5..6c88f7a56 100644 --- a/src/test/regress/expected/isolation_master_append_table.out +++ b/src/test/regress/expected/isolation_master_append_table.out @@ -5,27 +5,26 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-master_append_table_to_shard: - SELECT - master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) - FROM - pg_dist_shard - WHERE - 'table_to_append'::regclass::oid = logicalrelid; + SELECT + master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) + FROM + pg_dist_shard + WHERE + 'table_to_append'::regclass::oid = logicalrelid; master_append_table_to_shard 0.0426667 step s2-master_append_table_to_shard: - - SELECT - master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) - FROM - pg_dist_shard - WHERE - 'table_to_append'::regclass::oid = logicalrelid; + SELECT + master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) + FROM + pg_dist_shard + WHERE + 'table_to_append'::regclass::oid = logicalrelid; step s1-commit: COMMIT; @@ -35,5 +34,5 @@ master_append_table_to_shard 0.064 step s2-commit: - COMMIT; + COMMIT; diff --git a/src/test/regress/expected/isolation_progress_monitoring.out b/src/test/regress/expected/isolation_progress_monitoring.out index e7c208c9a..e2b699394 100644 --- a/src/test/regress/expected/isolation_progress_monitoring.out +++ b/src/test/regress/expected/isolation_progress_monitoring.out @@ -2,20 +2,18 @@ Parsed test spec with 5 sessions starting permutation: take-locks s1-start-operation s2-start-operation s3-start-operation show-progress release-locks-1 show-progress release-locks-2 show-progress release-locks-3 step take-locks: - -- Locks for steps of sample operation in s1 - SELECT pg_advisory_lock(10); - SELECT pg_advisory_lock(11); - SELECT pg_advisory_lock(12); - - -- Locks for steps of sample operation in s2 - SELECT pg_advisory_lock(20); - SELECT pg_advisory_lock(21); - SELECT pg_advisory_lock(22); - - -- Locks for steps of sample operation in s3 - SELECT pg_advisory_lock(30); - SELECT pg_advisory_lock(31); - SELECT pg_advisory_lock(32); + -- Locks for steps of sample operation in s1 + SELECT pg_advisory_lock(10); + SELECT pg_advisory_lock(11); + SELECT pg_advisory_lock(12); + -- Locks for steps of sample operation in s2 + SELECT pg_advisory_lock(20); + SELECT pg_advisory_lock(21); + SELECT pg_advisory_lock(22); + -- Locks for steps of sample operation in s3 + SELECT pg_advisory_lock(30); + SELECT pg_advisory_lock(31); + SELECT pg_advisory_lock(32); pg_advisory_lock @@ -45,17 +43,17 @@ pg_advisory_lock step s1-start-operation: - SELECT sample_operation(1337, 10, -1); + SELECT sample_operation(1337, 10, -1); step s2-start-operation: - SELECT sample_operation(1337, 20, 2); + SELECT sample_operation(1337, 20, 2); step s3-start-operation: - SELECT sample_operation(3778, 30, 9); + SELECT sample_operation(3778, 30, 9); step show-progress: - SELECT show_progress(1337); - SELECT show_progress(3778); + SELECT show_progress(1337); + SELECT show_progress(3778); show_progress @@ -68,10 +66,10 @@ show_progress (0,0) (1,0) step release-locks-1: - -- Release the locks of first steps of sample operations - SELECT pg_advisory_unlock(10); - SELECT pg_advisory_unlock(20); - SELECT pg_advisory_unlock(30); + -- Release the locks of first steps of sample operations + SELECT pg_advisory_unlock(10); + SELECT pg_advisory_unlock(20); + SELECT pg_advisory_unlock(30); pg_advisory_unlock @@ -83,8 +81,8 @@ pg_advisory_unlock t step show-progress: - SELECT show_progress(1337); - SELECT show_progress(3778); + SELECT show_progress(1337); + SELECT show_progress(3778); show_progress @@ -97,10 +95,10 @@ show_progress (0,9) (1,0) step release-locks-2: - -- Release the locks of second steps of sample operations - SELECT pg_advisory_unlock(11); - SELECT pg_advisory_unlock(21); - SELECT pg_advisory_unlock(31); + -- Release the locks of second steps of sample operations + SELECT pg_advisory_unlock(11); + SELECT pg_advisory_unlock(21); + SELECT pg_advisory_unlock(31); pg_advisory_unlock @@ -112,8 +110,8 @@ pg_advisory_unlock t step show-progress: - SELECT show_progress(1337); - SELECT show_progress(3778); + SELECT show_progress(1337); + SELECT show_progress(3778); show_progress @@ -126,10 +124,10 @@ show_progress (0,9) (1,9) step release-locks-3: - -- Release the locks of final steps of sample operations - SELECT pg_advisory_unlock(12); - SELECT pg_advisory_unlock(22); - SELECT pg_advisory_unlock(32); + -- Release the locks of final steps of sample operations + SELECT pg_advisory_unlock(12); + SELECT pg_advisory_unlock(22); + SELECT pg_advisory_unlock(32); pg_advisory_unlock diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 780b3647c..a4bb53818 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -65,7 +65,7 @@ test: isolation_multiuser_locking # MX tests test: isolation_reference_on_mx test: isolation_ref2ref_foreign_keys_on_mx -test: isolation_get_distributed_wait_queries +test: isolation_get_distributed_wait_queries_mx test: isolation_insert_vs_all_on_mx test: isolation_select_vs_all_on_mx test: isolation_update_delete_upsert_vs_all_on_mx diff --git a/src/test/regress/specs/README.md b/src/test/regress/spec/README.md similarity index 80% rename from src/test/regress/specs/README.md rename to src/test/regress/spec/README.md index aa6ac6d64..1ec38ed1a 100644 --- a/src/test/regress/specs/README.md +++ b/src/test/regress/spec/README.md @@ -1,6 +1,9 @@ In this folder, all tests which in the format of '*_add.spec' organized according to specific format. +You should use `//` in mx files not `//`. We preprocess mx files with `cpp` to +include `isolation_mx_common.include.spec`. + For isolation tests, we selected 'n' representative operations and we aimed to perform all possible pairs of 'n' operations together. So first test just runs first of these 'n' operation with remaining 'n - 1' operation. Similary, second diff --git a/src/test/regress/specs/isolation_add_node_vs_reference_table_operations.spec b/src/test/regress/spec/isolation_add_node_vs_reference_table_operations.spec similarity index 85% rename from src/test/regress/specs/isolation_add_node_vs_reference_table_operations.spec rename to src/test/regress/spec/isolation_add_node_vs_reference_table_operations.spec index fa484651a..424c25326 100644 --- a/src/test/regress/specs/isolation_add_node_vs_reference_table_operations.spec +++ b/src/test/regress/spec/isolation_add_node_vs_reference_table_operations.spec @@ -1,5 +1,5 @@ -# the test expects to have zero nodes in pg_dist_node at the beginning -# add single one of the nodes for the purpose of the test +// the test expects to have zero nodes in pg_dist_node at the beginning +// add single one of the nodes for the purpose of the test setup { SET citus.shard_replication_factor to 1; @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('test_table','x'); } -# ensure neither node's added for the remaining of the isolation tests +// ensure neither node's added for the remaining of the isolation tests teardown { DROP TABLE test_reference_table; @@ -46,8 +46,8 @@ step "s1-commit" session "s2" -# COPY accesses all shard/placement metadata, so should be enough for -# loading the cache +// COPY accesses all shard/placement metadata, so should be enough for +// loading the cache step "s2-load-metadata-cache" { COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; @@ -113,10 +113,10 @@ step "s2-print-index-count" nodeport; } -# verify that copy/insert gets the invalidation and re-builts its metadata cache -# note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache" -# to ensure that metadata is cached otherwise the test would be useless since -# the cache would be empty and the metadata data is gathered from the tables directly +// verify that copy/insert gets the invalidation and re-builts its metadata cache +// note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache" +// to ensure that metadata is cached otherwise the test would be useless since +// the cache would be empty and the metadata data is gathered from the tables directly permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content" @@ -127,7 +127,7 @@ permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-creat permutation "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2" -# same tests without loading the cache +// same tests without loading the cache permutation "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content" diff --git a/src/test/regress/specs/isolation_add_remove_node.spec b/src/test/regress/spec/isolation_add_remove_node.spec similarity index 74% rename from src/test/regress/specs/isolation_add_remove_node.spec rename to src/test/regress/spec/isolation_add_remove_node.spec index 348553a08..40a008749 100644 --- a/src/test/regress/specs/isolation_add_remove_node.spec +++ b/src/test/regress/spec/isolation_add_remove_node.spec @@ -107,43 +107,43 @@ step "s2-commit" COMMIT; } -# session 1 adds a node, session 2 removes it, should be ok +// session 1 adds a node, session 2 removes it, should be ok permutation "s1-begin" "s1-add-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes" -# add a different node from 2 sessions, should be ok +// add a different node from 2 sessions, should be ok permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-commit" "s1-show-nodes" -# add the same node from 2 sessions, should be ok (idempotent) +// add the same node from 2 sessions, should be ok (idempotent) permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-commit" "s1-show-nodes" -# add a different node from 2 sessions, one aborts +// add a different node from 2 sessions, one aborts permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-abort" "s1-show-nodes" -# add the same node from 2 sessions, one aborts +// add the same node from 2 sessions, one aborts permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-abort" "s1-show-nodes" -# remove a different node from 2 transactions, should be ok +// remove a different node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-add-node-2" "s1-begin" "s1-remove-node-1" "s2-remove-node-2" "s1-commit" "s1-show-nodes" -# remove the same node from 2 transactions, should be ok (idempotent) +// remove the same node from 2 transactions, should be ok (idempotent) permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes" -# activate an active node from 2 transactions, should be ok +// activate an active node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" -# disable an active node from 2 transactions, should be ok +// disable an active node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" -# activate an inactive node from 2 transactions, should be ok +// activate an inactive node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" -# disable an inactive node from 2 transactions, should be ok +// disable an inactive node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" -# disable and activate an active node from 2 transactions, should be ok +// disable and activate an active node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" -# activate and disable an active node node from 2 transactions, should be ok +// activate and disable an active node node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" -# disable and activate an inactive node from 2 transactions, should be ok +// disable and activate an inactive node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" -# activate and disable an inactive node node from 2 transactions, should be ok +// activate and disable an inactive node node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" -# activate and disable an inactive node from 2 transactions, one aborts +// activate and disable an inactive node from 2 transactions, one aborts permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" -# disable an active node from 2 transactions, one aborts +// disable an active node from 2 transactions, one aborts permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" diff --git a/src/test/regress/specs/isolation_alter_role_propagation.spec b/src/test/regress/spec/isolation_alter_role_propagation.spec similarity index 100% rename from src/test/regress/specs/isolation_alter_role_propagation.spec rename to src/test/regress/spec/isolation_alter_role_propagation.spec diff --git a/src/test/regress/specs/isolation_append_copy_vs_all.spec b/src/test/regress/spec/isolation_append_copy_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_append_copy_vs_all.spec rename to src/test/regress/spec/isolation_append_copy_vs_all.spec index 4778ac261..476ed2720 100644 --- a/src/test/regress/specs/isolation_append_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_append_copy_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create append distributed table to test behavior of COPY in concurrent operations +// create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_distributed_table('append_copy', 'id', 'append'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS append_copy CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } @@ -51,7 +51,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } @@ -79,10 +79,10 @@ step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELE step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); } -# permutations - COPY vs COPY +// permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" -# permutations - COPY first +// permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" @@ -103,7 +103,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-apply-delete-command permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - COPY second +// permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_cancellation.spec b/src/test/regress/spec/isolation_cancellation.spec similarity index 71% rename from src/test/regress/specs/isolation_cancellation.spec rename to src/test/regress/spec/isolation_cancellation.spec index 872cb5070..bcfb2fb37 100644 --- a/src/test/regress/specs/isolation_cancellation.spec +++ b/src/test/regress/spec/isolation_cancellation.spec @@ -1,6 +1,6 @@ -# Tests around cancelling statements. As we can't trigger cancel -# interrupts directly, we use statement_timeout instead, which largely -# behaves the same as proper cancellation. +// Tests around cancelling statements. As we can't trigger cancel +// interrupts directly, we use statement_timeout instead, which largely +// behaves the same as proper cancellation. setup { @@ -65,16 +65,16 @@ step "s2-drop" DROP TABLE cancel_table; } -# check that statement cancel works for plain selects, drop table -# afterwards to make sure sleep on workers is cancelled (thereby not -# preventing drop via locks) +// check that statement cancel works for plain selects, drop table +// afterwards to make sure sleep on workers is cancelled (thereby not +// preventing drop via locks) permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s2-drop" -# check that statement cancel works for selects in transaction +// check that statement cancel works for selects in transaction permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop" -# check that statement cancel works for selects in transaction, that previously wrote +// check that statement cancel works for selects in transaction, that previously wrote permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop" diff --git a/src/test/regress/specs/isolation_citus_dist_activity.spec b/src/test/regress/spec/isolation_citus_dist_activity.spec similarity index 93% rename from src/test/regress/specs/isolation_citus_dist_activity.spec rename to src/test/regress/spec/isolation_citus_dist_activity.spec index e3976eaf5..eaac9b7da 100644 --- a/src/test/regress/specs/isolation_citus_dist_activity.spec +++ b/src/test/regress/spec/isolation_citus_dist_activity.spec @@ -92,8 +92,8 @@ step "s3-view-worker" SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; } -# we prefer to sleep before "s2-view-dist" so that we can ensure -# the "wait_event" in the output doesn't change randomly (e.g., NULL to CliendRead etc.) +// we prefer to sleep before "s2-view-dist" so that we can ensure +// the "wait_event" in the output doesn't change randomly (e.g., NULL to CliendRead etc.) permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-alter-table" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback" permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-insert" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback" permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-select" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback" diff --git a/src/test/regress/specs/isolation_cluster_management.spec b/src/test/regress/spec/isolation_cluster_management.spec similarity index 100% rename from src/test/regress/specs/isolation_cluster_management.spec rename to src/test/regress/spec/isolation_cluster_management.spec diff --git a/src/test/regress/specs/isolation_concurrent_dml.spec b/src/test/regress/spec/isolation_concurrent_dml.spec similarity index 79% rename from src/test/regress/specs/isolation_concurrent_dml.spec rename to src/test/regress/spec/isolation_concurrent_dml.spec index 642dd2a22..7dc66f9d1 100644 --- a/src/test/regress/specs/isolation_concurrent_dml.spec +++ b/src/test/regress/spec/isolation_concurrent_dml.spec @@ -59,17 +59,17 @@ step "s2-commit" COMMIT; } -# verify that an in-progress insert blocks concurrent updates +// verify that an in-progress insert blocks concurrent updates permutation "s1-begin" "s1-insert" "s2-update" "s1-commit" -# but an insert without xact will not block +// but an insert without xact will not block permutation "s1-insert" "s2-update" -# verify that an in-progress multi-row insert blocks concurrent updates +// verify that an in-progress multi-row insert blocks concurrent updates permutation "s1-begin" "s1-multi-insert" "s2-update" "s1-commit" -# two multi-row inserts that hit same shards will block +// two multi-row inserts that hit same shards will block permutation "s1-begin" "s1-multi-insert" "s2-multi-insert-overlap" "s1-commit" -# but concurrent multi-row inserts don't block unless shards overlap +// but concurrent multi-row inserts don't block unless shards overlap permutation "s1-begin" "s2-begin" "s1-multi-insert" "s2-multi-insert" "s1-commit" "s2-commit" diff --git a/src/test/regress/specs/isolation_copy_placement_vs_copy_placement.spec b/src/test/regress/spec/isolation_copy_placement_vs_copy_placement.spec similarity index 67% rename from src/test/regress/specs/isolation_copy_placement_vs_copy_placement.spec rename to src/test/regress/spec/isolation_copy_placement_vs_copy_placement.spec index d2379d827..18e94653e 100644 --- a/src/test/regress/specs/isolation_copy_placement_vs_copy_placement.spec +++ b/src/test/regress/spec/isolation_copy_placement_vs_copy_placement.spec @@ -1,5 +1,5 @@ -# we use 5 as the partition key value through out the test -# so setting the corresponding shard here is useful +// we use 5 as the partition key value through out the test +// so setting the corresponding shard here is useful setup { SET citus.shard_count TO 2; @@ -18,9 +18,9 @@ teardown session "s1" -# since test_hash_table has rep > 1 simple select query doesn't hit all placements -# hence not all placements are cached -# but with copy all placements are cached +// since test_hash_table has rep > 1 simple select query doesn't hit all placements +// hence not all placements are cached +// but with copy all placements are cached step "s1-load-cache" { COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; @@ -48,9 +48,9 @@ step "s2-repair-placement" SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); } -# since test_hash_table has rep > 1 simple select query doesn't hit all placements -# hence not all placements are cached -# but with copy all placements are cached +// since test_hash_table has rep > 1 simple select query doesn't hit all placements +// hence not all placements are cached +// but with copy all placements are cached step "s2-load-cache" { COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; @@ -61,11 +61,11 @@ step "s2-commit" COMMIT; } -# two concurrent shard repairs on the same shard -# note that "s1-repair-placement" errors out but that is expected -# given that "s2-repair-placement" succeeds and the placement is -# already repaired +// two concurrent shard repairs on the same shard +// note that "s1-repair-placement" errors out but that is expected +// given that "s2-repair-placement" succeeds and the placement is +// already repaired permutation "s1-load-cache" "s2-load-cache" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit" -# the same test without the load caches +// the same test without the load caches permutation "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit" diff --git a/src/test/regress/specs/isolation_copy_placement_vs_modification.spec b/src/test/regress/spec/isolation_copy_placement_vs_modification.spec similarity index 87% rename from src/test/regress/specs/isolation_copy_placement_vs_modification.spec rename to src/test/regress/spec/isolation_copy_placement_vs_modification.spec index 98a2a2c37..7efc53240 100644 --- a/src/test/regress/specs/isolation_copy_placement_vs_modification.spec +++ b/src/test/regress/spec/isolation_copy_placement_vs_modification.spec @@ -1,5 +1,5 @@ -# we use 5 as the partition key value through out the test -# so setting the corresponding shard here is useful +// we use 5 as the partition key value through out the test +// so setting the corresponding shard here is useful setup { SET citus.shard_count TO 2; @@ -24,8 +24,8 @@ step "s1-begin" SET LOCAL citus.select_opens_transaction_block TO off; } -# since test_copy_placement_vs_modification has rep > 1 simple select query doesn't hit all placements -# hence not all placements are cached +// since test_copy_placement_vs_modification has rep > 1 simple select query doesn't hit all placements +// hence not all placements are cached step "s1-load-cache" { TRUNCATE test_copy_placement_vs_modification; @@ -110,9 +110,9 @@ step "s2-print-index-count" nodeport; } -# repair a placement while concurrently performing an update/delete/insert/copy -# note that at some points we use "s1-select" just after "s1-begin" given that BEGIN -# may invalidate cache at certain cases +// repair a placement while concurrently performing an update/delete/insert/copy +// note that at some points we use "s1-select" just after "s1-begin" given that BEGIN +// may invalidate cache at certain cases permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" @@ -120,7 +120,7 @@ permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" " permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count" -# the same tests without loading the cache at first +// the same tests without loading the cache at first permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" diff --git a/src/test/regress/specs/isolation_copy_vs_all_on_mx.spec b/src/test/regress/spec/isolation_copy_vs_all_on_mx.spec similarity index 56% rename from src/test/regress/specs/isolation_copy_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_copy_vs_all_on_mx.spec index 3e36d7c3b..53292f48a 100644 --- a/src/test/regress/specs/isolation_copy_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_copy_vs_all_on_mx.spec @@ -1,45 +1,14 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE copy_table(id integer, value integer); SELECT create_distributed_table('copy_table', 'id'); COPY copy_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS copy_table CASCADE; @@ -48,7 +17,7 @@ teardown session "s1" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -83,7 +52,7 @@ step "s2-begin" BEGIN; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { @@ -143,5 +112,5 @@ step "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-begin" "s2-coordinator-drop" "s1-commit-worker" "s2-commit" "s1-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" -#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" +//Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" diff --git a/src/test/regress/specs/isolation_create_distributed_table.spec b/src/test/regress/spec/isolation_create_distributed_table.spec similarity index 88% rename from src/test/regress/specs/isolation_create_distributed_table.spec rename to src/test/regress/spec/isolation_create_distributed_table.spec index 600a55092..0934df358 100644 --- a/src/test/regress/specs/isolation_create_distributed_table.spec +++ b/src/test/regress/spec/isolation_create_distributed_table.spec @@ -52,12 +52,12 @@ step "s2-commit" COMMIT; } -#concurrent create_distributed_table on empty table +//concurrent create_distributed_table on empty table permutation "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-create_distributed_table" "s1-commit" "s2-commit" -#concurrent create_distributed_table vs. copy to table +//concurrent create_distributed_table vs. copy to table permutation "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-copy_to_local_table" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s2-copy_to_local_table" "s1-create_distributed_table" "s2-commit" "s1-commit" -#concurrent create_distributed_table on non-empty table +//concurrent create_distributed_table on non-empty table permutation "s1-copy_to_local_table" "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-create_distributed_table" "s1-commit" "s2-commit" diff --git a/src/test/regress/specs/isolation_create_restore_point.spec b/src/test/regress/spec/isolation_create_restore_point.spec similarity index 66% rename from src/test/regress/specs/isolation_create_restore_point.spec rename to src/test/regress/spec/isolation_create_restore_point.spec index 68dbba5bb..fbf18879f 100644 --- a/src/test/regress/specs/isolation_create_restore_point.spec +++ b/src/test/regress/spec/isolation_create_restore_point.spec @@ -142,68 +142,68 @@ step "s2-commit" COMMIT; } -# verify that citus_create_restore_point is blocked by concurrent create_distributed_table +// verify that citus_create_restore_point is blocked by concurrent create_distributed_table permutation "s1-begin" "s1-create-distributed" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent INSERT (only commit) +// verify that citus_create_restore_point is not blocked by concurrent INSERT (only commit) permutation "s1-begin" "s1-insert" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent multi-shard UPDATE (only commit) +// verify that citus_create_restore_point is not blocked by concurrent multi-shard UPDATE (only commit) permutation "s1-begin" "s1-modify-multiple" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent DDL (only commit) +// verify that citus_create_restore_point is not blocked by concurrent DDL (only commit) permutation "s1-begin" "s1-ddl" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent COPY (only commit) +// verify that citus_create_restore_point is not blocked by concurrent COPY (only commit) permutation "s1-begin" "s1-copy" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is blocked by concurrent recover_prepared_transactions +// verify that citus_create_restore_point is blocked by concurrent recover_prepared_transactions permutation "s1-begin" "s1-recover" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is blocked by concurrent DROP TABLE +// verify that citus_create_restore_point is blocked by concurrent DROP TABLE permutation "s1-begin" "s1-drop" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is blocked by concurrent master_add_node +// verify that citus_create_restore_point is blocked by concurrent master_add_node permutation "s1-begin" "s1-add-node" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is blocked by concurrent master_remove_node +// verify that citus_create_restore_point is blocked by concurrent master_remove_node permutation "s1-begin" "s1-remove-node" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is blocked by concurrent citus_create_restore_point +// verify that citus_create_restore_point is blocked by concurrent citus_create_restore_point permutation "s1-begin" "s1-create-restore" "s2-create-restore" "s1-commit" -# verify that multi-shard UPDATE is blocked by concurrent citus_create_restore_point +// verify that multi-shard UPDATE is blocked by concurrent citus_create_restore_point permutation "s2-begin" "s2-create-restore" "s1-modify-multiple" "s2-commit" -# verify that DDL is blocked by concurrent citus_create_restore_point +// verify that DDL is blocked by concurrent citus_create_restore_point permutation "s2-begin" "s2-create-restore" "s1-ddl" "s2-commit" -# verify that multi-statement transactions are blocked by concurrent citus_create_restore_point +// verify that multi-statement transactions are blocked by concurrent citus_create_restore_point permutation "s2-begin" "s2-create-restore" "s1-multi-statement" "s2-commit" -# verify that citus_create_restore_point is blocked by concurrent create_reference_table +// verify that citus_create_restore_point is blocked by concurrent create_reference_table permutation "s1-begin" "s1-create-reference" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent reference table INSERT (only commit) +// verify that citus_create_restore_point is not blocked by concurrent reference table INSERT (only commit) permutation "s1-begin" "s1-insert-ref" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent reference table UPDATE (only commit) +// verify that citus_create_restore_point is not blocked by concurrent reference table UPDATE (only commit) permutation "s1-begin" "s1-modify-multiple-ref" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent refence table DDL (only commit) +// verify that citus_create_restore_point is not blocked by concurrent refence table DDL (only commit) permutation "s1-begin" "s1-ddl-ref" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is not blocked by concurrent COPY to reference table (only commit) +// verify that citus_create_restore_point is not blocked by concurrent COPY to reference table (only commit) permutation "s1-begin" "s1-copy-ref" "s2-create-restore" "s1-commit" -# verify that citus_create_restore_point is blocked by concurrent DROP TABLE when table is a reference table +// verify that citus_create_restore_point is blocked by concurrent DROP TABLE when table is a reference table permutation "s1-begin" "s1-drop-ref" "s2-create-restore" "s1-commit" -# verify that reference table UPDATE is blocked by concurrent citus_create_restore_point +// verify that reference table UPDATE is blocked by concurrent citus_create_restore_point permutation "s2-begin" "s2-create-restore" "s1-modify-multiple-ref" "s2-commit" -# verify that reference table DDL is blocked by concurrent citus_create_restore_point +// verify that reference table DDL is blocked by concurrent citus_create_restore_point permutation "s2-begin" "s2-create-restore" "s1-ddl-ref" "s2-commit" -# verify that multi-statement transactions with reference tables are blocked by concurrent citus_create_restore_point +// verify that multi-statement transactions with reference tables are blocked by concurrent citus_create_restore_point permutation "s2-begin" "s2-create-restore" "s1-multi-statement-ref" "s2-commit" diff --git a/src/test/regress/specs/isolation_create_table_vs_add_remove_node.spec b/src/test/regress/spec/isolation_create_table_vs_add_remove_node.spec similarity index 89% rename from src/test/regress/specs/isolation_create_table_vs_add_remove_node.spec rename to src/test/regress/spec/isolation_create_table_vs_add_remove_node.spec index 4986beee3..bb03b843b 100644 --- a/src/test/regress/specs/isolation_create_table_vs_add_remove_node.spec +++ b/src/test/regress/spec/isolation_create_table_vs_add_remove_node.spec @@ -101,20 +101,20 @@ step "s2-commit" COMMIT; } -# session 1 adds a node, session 2 creates a distributed table +// session 1 adds a node, session 2 creates a distributed table permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select" permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select" permutation "s2-begin" "s2-create-table-1" "s1-add-node-2" "s2-commit" "s1-show-placements" "s2-select" -# session 1 removes a node, session 2 creates a distributed table +// session 1 removes a node, session 2 creates a distributed table permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-table-1" "s1-remove-node-2" "s2-commit" "s1-show-placements" "s2-select" -# session 1 removes a node, session 2 creates a distributed table with replication factor 2, should throw a sane error +// session 1 removes a node, session 2 creates a distributed table with replication factor 2, should throw a sane error permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-2" "s1-commit" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-table-2" "s1-remove-node-2" "s2-commit" "s2-select" -# session 1 removes a node, session 2 creates a shard in an append-distributed table +// session 1 removes a node, session 2 creates a shard in an append-distributed table permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-append-table" "s1-commit" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-append-table" "s1-remove-node-2" "s2-commit" "s2-select" diff --git a/src/test/regress/specs/isolation_data_migration.spec b/src/test/regress/spec/isolation_data_migration.spec similarity index 76% rename from src/test/regress/specs/isolation_data_migration.spec rename to src/test/regress/spec/isolation_data_migration.spec index d3b85fa6a..b0e75637e 100644 --- a/src/test/regress/specs/isolation_data_migration.spec +++ b/src/test/regress/spec/isolation_data_migration.spec @@ -58,16 +58,16 @@ step "s2-select" SELECT * FROM migration_table ORDER BY test_id; } -# verify that local COPY is picked up by create_distributed_table once it commits +// verify that local COPY is picked up by create_distributed_table once it commits permutation "s2-begin" "s2-copy" "s1-create_distributed_table" "s2-commit" "s2-select" -# verify that COPY is distributed once create_distributed_table commits +// verify that COPY is distributed once create_distributed_table commits permutation "s1-begin" "s1-create_distributed_table" "s2-copy" "s1-commit" "s2-select" -# verify that local INSERT is picked up by create_distributed_table once it commits +// verify that local INSERT is picked up by create_distributed_table once it commits permutation "s2-begin" "s2-insert" "s1-create_distributed_table" "s2-commit" "s2-select" -# verify that INSERT is distributed once create_distributed_table commits +// verify that INSERT is distributed once create_distributed_table commits permutation "s1-begin" "s1-create_distributed_table" "s2-insert" "s1-commit" "s2-select" -# verify that changes are picked up even in serializable mode +// verify that changes are picked up even in serializable mode permutation "s1-begin-serializable" "s2-copy" "s1-create_distributed_table" "s1-commit" "s2-select" permutation "s1-begin-serializable" "s2-insert" "s1-create_distributed_table" "s1-commit" "s2-select" diff --git a/src/test/regress/specs/isolation_ddl_vs_all.spec b/src/test/regress/spec/isolation_ddl_vs_all.spec similarity index 95% rename from src/test/regress/specs/isolation_ddl_vs_all.spec rename to src/test/regress/spec/isolation_ddl_vs_all.spec index 1b57ddef6..84603ba2c 100644 --- a/src/test/regress/specs/isolation_ddl_vs_all.spec +++ b/src/test/regress/spec/isolation_ddl_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of DDL in concurrent operations +// create range distributed table to test behavior of DDL in concurrent operations setup { SELECT citus_internal.replace_isolation_tester_func(); @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('ddl_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS ddl_hash CASCADE; @@ -21,7 +21,7 @@ teardown SELECT citus_internal.restore_isolation_tester_func(); } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -40,7 +40,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-ddl-create-index" { CREATE INDEX ddl_hash_index ON ddl_hash(id); } @@ -56,7 +56,7 @@ step "s2-distribute-table" { SELECT create_distributed_table('ddl_hash', 'id'); step "s2-select" { SELECT * FROM ddl_hash ORDER BY 1, 2; } step "s2-commit" { COMMIT; } -# permutations - DDL vs DDL +// permutations - DDL vs DDL permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-indexes" "s1-show-columns" @@ -70,7 +70,7 @@ permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-ddl-create-ind permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" -# permutations - DDL first +// permutations - DDL first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-table-size" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-indexes" @@ -83,7 +83,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-tab permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-columns" -# permutations - DDL second +// permutations - DDL second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" diff --git a/src/test/regress/specs/isolation_delete_vs_all.spec b/src/test/regress/spec/isolation_delete_vs_all.spec similarity index 94% rename from src/test/regress/specs/isolation_delete_vs_all.spec rename to src/test/regress/spec/isolation_delete_vs_all.spec index 62b0d302a..422384c24 100644 --- a/src/test/regress/specs/isolation_delete_vs_all.spec +++ b/src/test/regress/spec/isolation_delete_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of DELETE in concurrent operations +// create range distributed table to test behavior of DELETE in concurrent operations setup { SELECT citus_internal.replace_isolation_tester_func(); @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('delete_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS delete_hash CASCADE; @@ -21,7 +21,7 @@ teardown SELECT citus_internal.restore_isolation_tester_func(); } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -41,7 +41,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-delete" { DELETE FROM delete_hash WHERE id = 4; } @@ -59,10 +59,10 @@ step "s2-distribute-table" { SELECT create_distributed_table('delete_hash', 'id' step "s2-select" { SELECT * FROM delete_hash ORDER BY 1, 2; } step "s2-commit" { COMMIT; } -# permutations - DELETE vs DELETE +// permutations - DELETE vs DELETE permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" -# permutations - DELETE first +// permutations - DELETE first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" @@ -74,7 +74,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-rename-col permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" -# permutations - DELETE second +// permutations - DELETE second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" diff --git a/src/test/regress/specs/isolation_dis2ref_foreign_keys_on_mx.spec b/src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec similarity index 76% rename from src/test/regress/specs/isolation_dis2ref_foreign_keys_on_mx.spec rename to src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec index 92cfd0097..7bacf4fb5 100644 --- a/src/test/regress/specs/isolation_dis2ref_foreign_keys_on_mx.spec +++ b/src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec @@ -1,37 +1,7 @@ +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block. - -- Following is a workaround to overcome that. Port numbers are hard coded - -- at the moment. - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE ref_table(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table'); @@ -163,4 +133,4 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-rollback-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-udpate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display" -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" "s3-display" +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" "s3-display" diff --git a/src/test/regress/specs/isolation_distributed_deadlock_detection.spec b/src/test/regress/spec/isolation_distributed_deadlock_detection.spec similarity index 87% rename from src/test/regress/specs/isolation_distributed_deadlock_detection.spec rename to src/test/regress/spec/isolation_distributed_deadlock_detection.spec index 96bd1eb87..6ade6d615 100644 --- a/src/test/regress/specs/isolation_distributed_deadlock_detection.spec +++ b/src/test/regress/spec/isolation_distributed_deadlock_detection.spec @@ -368,69 +368,69 @@ step "s6-commit" COMMIT; } -# we disable the daemon during the regression tests in order to get consistent results -# thus we manually issue the deadlock detection +// we disable the daemon during the regression tests in order to get consistent results +// thus we manually issue the deadlock detection session "deadlock-checker" -# we issue the checker not only when there are deadlocks to ensure that we never cancel -# backend inappropriately +// we issue the checker not only when there are deadlocks to ensure that we never cancel +// backend inappropriately step "deadlock-checker-call" { SELECT check_distributed_deadlocks(); } -# simplest case, loop with two nodes +// simplest case, loop with two nodes permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit" -# simplest case with replication factor 2 +// simplest case with replication factor 2 permutation "s1-begin" "s2-begin" "s1-update-1-rep-2" "s2-update-2-rep-2" "s2-update-1-rep-2" "deadlock-checker-call" "s1-update-2-rep-2" "deadlock-checker-call" "s1-commit" "s2-commit" -# simplest case with 2pc enabled +// simplest case with 2pc enabled permutation "s1-begin" "s2-begin" "s1-set-2pc" "s2-set-2pc" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit" -# simplest case with multi-shard query is cancelled +// simplest case with multi-shard query is cancelled permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-upsert-select-all" "deadlock-checker-call" "s1-commit" "s2-commit" -# simplest case with DDL is cancelled +// simplest case with DDL is cancelled permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-ddl" "deadlock-checker-call" "s1-commit" "s2-commit" -# daedlock with local table +// daedlock with local table permutation "s1-begin" "s2-begin" "s1-insert-dist-10" "s2-insert-local-10" "s2-insert-dist-10" "s1-insert-local-10" "deadlock-checker-call" "s1-commit" "s2-commit" -# daedlock with reference tables only +// daedlock with reference tables only permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-insert-ref-11" "s2-insert-ref-11" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit" -# deadlock with reference + distributed tables +// deadlock with referecen + distributed tables permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-update-1" "deadlock-checker-call" "s2-update-1" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit" -# slightly more complex case, loop with three nodes +// slightly more complex case, loop with three nodes permutation "s1-begin" "s2-begin" "s3-begin" "s1-update-1" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s1-update-2" "s2-update-3" "s3-update-1" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" -# similar to the above (i.e., 3 nodes), but the cycle starts from the second node +// similar to the above (i.e., 3 nodes), but the cycle starts from the second node permutation "s1-begin" "s2-begin" "s3-begin" "s2-update-1" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s2-update-3" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" -# not connected graph +// not connected graph permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s4-update-4" "s2-update-3" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" "s4-commit" -# still a not connected graph, but each smaller graph contains dependencies, one of which is a distributed deadlock +// still a not connected graph, but each smaller graph contains dependencies, one of which is a distributed deadlock permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s4-update-1" "s1-update-1" "deadlock-checker-call" "s2-update-2" "s3-update-3" "s2-update-3" "s3-update-2" "deadlock-checker-call" "s3-commit" "s2-commit" "s4-commit" "s1-commit" -# multiple deadlocks on a not connected graph +// multiple deadlocks on a not connected graph permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s4-update-4" "s2-update-2" "s3-update-3" "s3-update-2" "s4-update-1" "s1-update-4" "deadlock-checker-call" "s1-commit" "s4-commit" "s2-update-3" "deadlock-checker-call" "s2-commit" "s3-commit" -# a larger graph where the first node is in the distributed deadlock +// a larger graph where the first node is in the distributed deadlock permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s1-update-1" "s5-update-5" "s3-update-2" "s2-update-3" "s4-update-4" "s3-update-4" "deadlock-checker-call" "s6-update-6" "s4-update-6" "s1-update-5" "s5-update-1" "deadlock-checker-call" "s1-commit" "s5-commit" "s6-commit" "s4-commit" "s3-commit" "s2-commit" -# a larger graph where the deadlock starts from a middle node +// a larger graph where the deadlock starts from a middle node permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s6-update-6" "s5-update-5" "s5-update-6" "s4-update-4" "s1-update-4" "s4-update-5" "deadlock-checker-call" "s2-update-3" "s3-update-2" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s6-commit" "s5-commit" "s4-commit" "s1-commit" "s3-commit" "s2-commit" -# a larger graph where the deadlock starts from the last node +// a larger graph where the deadlock starts from the last node permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s5-update-5" "s3-update-2" "s2-update-2" "s4-update-4" "s3-update-4" "s4-update-5" "s1-update-4" "deadlock-checker-call" "s6-update-6" "s5-update-6" "s6-update-5" "deadlock-checker-call" "s5-commit" "s6-commit" "s4-commit" "s3-commit" "s1-commit" "s2-commit" -# a backend is blocked on multiple backends -# note that session 5 is not strictly necessary to simulate the deadlock -# we only added that such that session 4 waits on for that -# thus if any cancellation happens on session 4, we'd be able to -# observe it, otherwise cancelling idle backends has not affect -# (cancelling wrong backend used to be a bug and already fixed) +// a backend is blocked on multiple backends +// note that session 5 is not strictly necessary to simulate the deadlock +// we only added that such that session 4 waits on for that +// thus if any cancellation happens on session 4, we'd be able to +// observe it, otherwise cancelling idle backends has not affect +// (cancelling wrong backend used to be a bug and already fixed) permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s1-update-1" "s3-update-3" "s2-update-4" "s2-update-3" "s4-update-2" "s5-random-adv-lock" "s4-random-adv-lock" "s3-update-1" "s1-update-2-4" "deadlock-checker-call" "deadlock-checker-call" "s5-commit" "s4-commit" "s2-commit" "s1-commit" "s3-commit" diff --git a/src/test/regress/specs/isolation_distributed_transaction_id.spec b/src/test/regress/spec/isolation_distributed_transaction_id.spec similarity index 86% rename from src/test/regress/specs/isolation_distributed_transaction_id.spec rename to src/test/regress/spec/isolation_distributed_transaction_id.spec index 46cc52b38..53d6dd828 100644 --- a/src/test/regress/specs/isolation_distributed_transaction_id.spec +++ b/src/test/regress/spec/isolation_distributed_transaction_id.spec @@ -1,4 +1,4 @@ -# Tests around distributed transaction id generation +// Tests around distributed transaction id generation setup { @@ -86,7 +86,7 @@ step "s2-commit" COMMIT; } -# print only the necessary parts to prevent concurrent runs to print different values +// print only the necessary parts to prevent concurrent runs to print different values step "s2-get-first-worker-active-transactions" { SELECT * FROM run_command_on_workers('SELECT row(initiator_node_identifier, transaction_number) @@ -124,13 +124,13 @@ step "s3-get-all-transactions" SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; } -# show that we could get all distributed transaction ids from seperate sessions +// show that we could get all distributed transaction ids from seperate sessions permutation "s1-begin" "s1-assign-transaction-id" "s1-get-all-transactions" "s2-begin" "s2-assign-transaction-id" "s2-get-all-transactions" "s3-begin" "s3-assign-transaction-id" "s3-get-all-transactions" "s1-commit" "s2-commit" "s3-commit" -# now show that distributed transaction id on the coordinator -# is the same with the one on the worker +// now show that distributed transaction id on the coordinator +// is the same with the one on the worker permutation "s1-create-table" "s1-begin" "s1-insert" "s1-verify-current-xact-is-on-worker" "s1-commit" -# we would initially forget the distributed transaction ID on pg_dist_partition invalidations +// we would initially forget the distributed transaction ID on pg_dist_partition invalidations permutation "s1-begin" "s1-assign-transaction-id" "s1-has-transaction-number" "s2-vacuum" "s1-has-transaction-number" "s1-commit" diff --git a/src/test/regress/specs/isolation_dml_vs_repair.spec b/src/test/regress/spec/isolation_dml_vs_repair.spec similarity index 89% rename from src/test/regress/specs/isolation_dml_vs_repair.spec rename to src/test/regress/spec/isolation_dml_vs_repair.spec index 0066892d9..78a2a764c 100644 --- a/src/test/regress/specs/isolation_dml_vs_repair.spec +++ b/src/test/regress/spec/isolation_dml_vs_repair.spec @@ -93,17 +93,17 @@ step "s2-commit" COMMIT; } -# verify that repair is blocked by ongoing modifying simple transaction +// verify that repair is blocked by ongoing modifying simple transaction permutation "s2-invalidate-57637" "s1-begin" "s1-insertone" "s2-repair" "s1-commit" -# verify that repair is blocked by ongoing modifying insert...select transaction +// verify that repair is blocked by ongoing modifying insert...select transaction permutation "s1-insertone" "s2-invalidate-57637" "s1-begin" "s1-insertall" "s2-repair" "s1-commit" -# verify that modifications wait for shard repair +// verify that modifications wait for shard repair permutation "s2-invalidate-57637" "s2-begin" "s2-repair" "s1-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" -# verify that prepared plain modifications wait for shard repair +// verify that prepared plain modifications wait for shard repair permutation "s2-invalidate-57637" "s1-prepared-insertone" "s2-begin" "s2-repair" "s1-prepared-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" -# verify that prepared INSERT ... SELECT waits for shard repair +// verify that prepared INSERT ... SELECT waits for shard repair permutation "s2-invalidate-57637" "s1-insertone" "s1-prepared-insertall" "s2-begin" "s2-repair" "s1-prepared-insertall" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" diff --git a/src/test/regress/specs/isolation_drop_alter_index_select_for_update_on_mx.spec b/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec similarity index 61% rename from src/test/regress/specs/isolation_drop_alter_index_select_for_update_on_mx.spec rename to src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec index 3dae98a2b..089ec473e 100644 --- a/src/test/regress/specs/isolation_drop_alter_index_select_for_update_on_mx.spec +++ b/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec @@ -1,45 +1,14 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE dist_table(id integer, value integer); SELECT create_distributed_table('dist_table', 'id'); COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS dist_table CASCADE; @@ -53,7 +22,7 @@ step "s1-begin" BEGIN; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -98,7 +67,7 @@ step "s1-commit" session "s2" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { diff --git a/src/test/regress/specs/isolation_drop_shards.spec b/src/test/regress/spec/isolation_drop_shards.spec similarity index 90% rename from src/test/regress/specs/isolation_drop_shards.spec rename to src/test/regress/spec/isolation_drop_shards.spec index fd9fce2d7..4ce4ad5fc 100644 --- a/src/test/regress/specs/isolation_drop_shards.spec +++ b/src/test/regress/spec/isolation_drop_shards.spec @@ -67,9 +67,9 @@ permutation "s1-begin" "s1-drop-all-shards" "s2-apply-delete-command" "s1-commit permutation "s1-begin" "s1-drop-all-shards" "s2-drop-all-shards" "s1-commit" permutation "s1-begin" "s1-drop-all-shards" "s2-select" "s1-commit" -# We can't verify master_apply_delete_command + SELECT since it blocks on the -# the workers, but this is not visible on the master, meaning the isolation -# test cannot proceed. +// We can't verify master_apply_delete_command + SELECT since it blocks on the +// the workers, but this is not visible on the master, meaning the isolation +// test cannot proceed. permutation "s1-begin" "s1-apply-delete-command" "s2-truncate" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-apply-delete-command" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-drop-all-shards" "s1-commit" diff --git a/src/test/regress/specs/isolation_drop_vs_all.spec b/src/test/regress/spec/isolation_drop_vs_all.spec similarity index 93% rename from src/test/regress/specs/isolation_drop_vs_all.spec rename to src/test/regress/spec/isolation_drop_vs_all.spec index 6bb7004d0..c23471a90 100644 --- a/src/test/regress/specs/isolation_drop_vs_all.spec +++ b/src/test/regress/spec/isolation_drop_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of DROP in concurrent operations +// create range distributed table to test behavior of DROP in concurrent operations setup { SELECT citus_internal.replace_isolation_tester_func(); @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('drop_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS drop_hash CASCADE; @@ -21,7 +21,7 @@ teardown SELECT citus_internal.restore_isolation_tester_func(); } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -39,7 +39,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-drop" { DROP TABLE drop_hash; } @@ -55,10 +55,10 @@ step "s2-distribute-table" { SELECT create_distributed_table('drop_hash', 'id'); step "s2-select" { SELECT * FROM drop_hash ORDER BY 1, 2; } step "s2-commit" { COMMIT; } -# permutations - DROP vs DROP +// permutations - DROP vs DROP permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" -# permutations - DROP first +// permutations - DROP first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" @@ -68,7 +68,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-rename-colum permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" -# permutations - DROP second +// permutations - DROP second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" diff --git a/src/test/regress/specs/isolation_dump_global_wait_edges.spec b/src/test/regress/spec/isolation_dump_global_wait_edges.spec similarity index 90% rename from src/test/regress/specs/isolation_dump_global_wait_edges.spec rename to src/test/regress/spec/isolation_dump_global_wait_edges.spec index 23f2b761e..b88dc692c 100644 --- a/src/test/regress/specs/isolation_dump_global_wait_edges.spec +++ b/src/test/regress/spec/isolation_dump_global_wait_edges.spec @@ -87,8 +87,8 @@ step "detector-dump-wait-edges" SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; } -# Distributed transaction blocked by another distributed transaction +// Distributed transaction blocked by another distributed transaction permutation "s1-begin" "s2-begin" "s1-update" "s2-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" -# Distributed transaction blocked by another distributed transaction blocked by another distributed transaction +// Distributed transaction blocked by another distributed transaction blocked by another distributed transaction permutation "s1-begin" "s2-begin" "s3-begin" "s1-update" "s2-update" "s3-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" "s3-abort" diff --git a/src/test/regress/specs/isolation_dump_local_wait_edges.spec b/src/test/regress/spec/isolation_dump_local_wait_edges.spec similarity index 87% rename from src/test/regress/specs/isolation_dump_local_wait_edges.spec rename to src/test/regress/spec/isolation_dump_local_wait_edges.spec index 57edf5b3e..9cdb99a26 100644 --- a/src/test/regress/specs/isolation_dump_local_wait_edges.spec +++ b/src/test/regress/spec/isolation_dump_local_wait_edges.spec @@ -81,11 +81,11 @@ step "detector-dump-wait-edges" blocking_transaction_waiting; } -# Distributed transaction blocked by another distributed transaction +// Distributed transaction blocked by another distributed transaction permutation "dist11-begin" "dist13-begin" "dist11-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "dist13-abort" -# Distributed transaction blocked by a regular transaction +// Distributed transaction blocked by a regular transaction permutation "local-begin" "dist13-begin" "local-update" "dist13-update" "detector-dump-wait-edges" "local-abort" "dist13-abort" -# Distributed transaction blocked by a regular transaction blocked by a distributed transaction +// Distributed transaction blocked by a regular transaction blocked by a distributed transaction permutation "dist11-begin" "local-begin" "dist13-begin" "dist11-update" "local-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "local-abort" "dist13-abort" diff --git a/src/test/regress/specs/isolation_ensure_dependency_activate_node.spec b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec similarity index 88% rename from src/test/regress/specs/isolation_ensure_dependency_activate_node.spec rename to src/test/regress/spec/isolation_ensure_dependency_activate_node.spec index 509ea07bf..a95624a7e 100644 --- a/src/test/regress/specs/isolation_ensure_dependency_activate_node.spec +++ b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec @@ -1,5 +1,5 @@ -# the test expects to have zero nodes in pg_dist_node at the beginning -# add single one of the nodes for the purpose of the test +// the test expects to have zero nodes in pg_dist_node at the beginning +// add single one of the nodes for the purpose of the test setup { CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER) @@ -11,7 +11,7 @@ setup SELECT 1 FROM master_add_node('localhost', 57637); } -# ensure that both nodes exists for the remaining of the isolation tests +// ensure that both nodes exists for the remaining of the isolation tests teardown { SELECT 1 FROM master_add_node('localhost', 57637); @@ -50,8 +50,8 @@ step "s1-commit" COMMIT; } -# printing in session 1 adds the worker node, this makes we are sure we count the objects -# on that node as well. After counting objects is done we remove the node again. +// printing in session 1 adds the worker node, this makes we are sure we count the objects +// on that node as well. After counting objects is done we remove the node again. step "s1-print-distributed-objects" { SELECT 1 FROM master_add_node('localhost', 57638); @@ -126,8 +126,8 @@ step "s2-commit" COMMIT; } -# prints from session 2 are run at the end when the worker has already been added by the -# test +// prints from session 2 are run at the end when the worker has already been added by the +// test step "s2-print-distributed-objects" { -- print an overview of all distributed objects @@ -195,7 +195,7 @@ step "s3-commit" } -# schema only tests +// schema only tests permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" @@ -203,25 +203,25 @@ permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" -# concurrency tests with multi schema distribution +// concurrency tests with multi schema distribution permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-table" "s3-use-schema" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects" -# type and schema tests +// type and schema tests permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" -# distributed function tests -# isolation tests are not very simple psql, so trigger NOTIFY reliably for -# s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by -# "s2-commit", because "COMMIT" syncs the messages +// distributed function tests +// isolation tests are not very simple psql, so trigger NOTIFY reliably for +// s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by +// "s2-commit", because "COMMIT" syncs the messages permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" -# we cannot run the following operations concurrently -# the problem is that NOTIFY event doesn't (reliably) happen before COMMIT -# so we have to commit s2 before s1 starts +// we cannot run the following operations concurrently +// the problem is that NOTIFY event doesn't (reliably) happen before COMMIT +// so we have to commit s2 before s1 starts permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" diff --git a/src/test/regress/specs/isolation_extension_commands.spec b/src/test/regress/spec/isolation_extension_commands.spec similarity index 97% rename from src/test/regress/specs/isolation_extension_commands.spec rename to src/test/regress/spec/isolation_extension_commands.spec index 72e4b57af..19d07844b 100644 --- a/src/test/regress/specs/isolation_extension_commands.spec +++ b/src/test/regress/spec/isolation_extension_commands.spec @@ -105,7 +105,7 @@ step "s2-remove-node-1" SELECT 1 FROM master_remove_node('localhost', 57637); } -# master_#_node vs extension command +// master_//_node vs extension command permutation "s1-begin" "s1-add-node-1" "s2-create-extension-version-11" "s1-commit" "s1-print" permutation "s1-begin" "s1-add-node-1" "s2-alter-extension-update-to-version-12" "s1-commit" "s1-print" permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-drop-extension" "s1-commit" "s1-print" @@ -114,7 +114,7 @@ permutation "s1-begin" "s1-add-node-1" "s2-drop-extension" "s1-commit" "s1-print permutation "s1-add-node-1" "s1-create-extension-with-schema2" "s1-begin" "s1-remove-node-1" "s2-alter-extension-set-schema3" "s1-commit" "s1-print" permutation "s1-add-node-1" "s2-drop-extension" "s1-begin" "s1-remove-node-1" "s2-create-extension-with-schema1" "s1-commit" "s1-print" -# extension command vs master_#_node +// extension command vs master_#_node permutation "s2-add-node-1" "s2-drop-extension" "s2-remove-node-1" "s2-begin" "s2-create-extension-version-11" "s1-add-node-1" "s2-commit" "s1-print" permutation "s2-drop-extension" "s2-add-node-1" "s2-create-extension-version-11" "s2-remove-node-1" "s2-begin" "s2-alter-extension-update-to-version-12" "s1-add-node-1" "s2-commit" "s1-print" permutation "s2-add-node-1" "s2-begin" "s2-drop-extension" "s1-remove-node-1" "s2-commit" "s1-print" diff --git a/src/test/regress/specs/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec similarity index 95% rename from src/test/regress/specs/isolation_get_all_active_transactions.spec rename to src/test/regress/spec/isolation_get_all_active_transactions.spec index 2fcddcc65..da0c4553c 100644 --- a/src/test/regress/specs/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -30,7 +30,7 @@ teardown session "s1" -# run_command_on_placements is done in a separate step because the setup is executed as a single transaction +// run_command_on_placements is done in a separate step because the setup is executed as a single transaction step "s1-grant" { GRANT ALL ON test_table TO test_user_1; diff --git a/src/test/regress/specs/isolation_get_distributed_wait_queries.spec b/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec similarity index 81% rename from src/test/regress/specs/isolation_get_distributed_wait_queries.spec rename to src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec index 0fd35fe0f..d49aa1f90 100644 --- a/src/test/regress/specs/isolation_get_distributed_wait_queries.spec +++ b/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec @@ -1,38 +1,6 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. -setup -{ - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.shard_replication_factor TO 1; - SET citus.replication_model to streaming; +#include "isolation_mx_common.include.spec" +setup { CREATE TABLE ref_table(user_id int, value_1 int); SELECT create_reference_table('ref_table'); INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71); @@ -42,8 +10,8 @@ setup INSERT INTO tt1 VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71); } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE ref_table; @@ -63,7 +31,7 @@ step "s1-update-ref-table-from-coordinator" UPDATE ref_table SET value_1 = 15; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -199,7 +167,7 @@ step "s3-select-distributed-waiting-queries" SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; } -# session s1 and s4 executes the commands on the same worker node +// session s1 and s4 executes the commands on the same worker node session "s4" step "s4-start-session-level-connection" @@ -238,8 +206,8 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy-to permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-ref-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-into-ref-table" "s1-begin" "s1-alter-table" "s3-select-distributed-waiting-queries" "s2-commit-worker" "s1-commit" "s2-stop-connection" -# make sure that multi-shard modification queries -# show up in the waiting processes even if they are -# blocked on the same node +// make sure that multi-shard modification queries +// show up in the waiting processes even if they are +// blocked on the same node permutation "s1-begin" "s1-update-on-the-coordinator" "s2-update-on-the-coordinator" "s3-select-distributed-waiting-queries" "s1-commit" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update-dist-table" "s4-start-session-level-connection" "s4-begin-on-worker" "s4-update-dist-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s4-commit-worker" "s1-stop-connection" "s4-stop-connection" diff --git a/src/test/regress/specs/isolation_hash_copy_vs_all.spec b/src/test/regress/spec/isolation_hash_copy_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_hash_copy_vs_all.spec rename to src/test/regress/spec/isolation_hash_copy_vs_all.spec index 1dba9b92c..9b607c2a5 100644 --- a/src/test/regress/specs/isolation_hash_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_hash_copy_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create append distributed table to test behavior of COPY in concurrent operations +// create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_distributed_table('hash_copy', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS hash_copy CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } @@ -58,7 +58,7 @@ step "s1-recreate-with-replication-2" SELECT create_distributed_table('hash_copy', 'id'); } -# session 2 +// session 2 session "s2" step "s2-copy" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } @@ -86,10 +86,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM hash_copy; } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); } -# permutations - COPY vs COPY +// permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" -# permutations - COPY first +// permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" @@ -110,13 +110,13 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shar permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - COPY first (replication factor 2) +// permutations - COPY first (replication factor 2) permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" -# permutations - COPY second +// permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_insert_select_conflict.spec b/src/test/regress/spec/isolation_insert_select_conflict.spec similarity index 100% rename from src/test/regress/specs/isolation_insert_select_conflict.spec rename to src/test/regress/spec/isolation_insert_select_conflict.spec diff --git a/src/test/regress/specs/isolation_insert_select_vs_all.spec b/src/test/regress/spec/isolation_insert_select_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_insert_select_vs_all.spec rename to src/test/regress/spec/isolation_insert_select_vs_all.spec index 7ea3b602b..b9cef7db1 100644 --- a/src/test/regress/specs/isolation_insert_select_vs_all.spec +++ b/src/test/regress/spec/isolation_insert_select_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of INSERT/SELECT in concurrent operations +// create range distributed table to test behavior of INSERT/SELECT in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -12,14 +12,14 @@ setup SELECT create_distributed_table('select_of_insert_select_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS insert_of_insert_select_hash CASCADE; DROP TABLE IF EXISTS select_of_insert_select_hash CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { @@ -65,7 +65,7 @@ step "s1-show-columns-selected" { SELECT run_command_on_workers('SELECT column_n step "s1-select-count" { SELECT COUNT(*) FROM select_of_insert_select_hash; } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-insert-select" { INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; } step "s2-update-on-inserted" { UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; } @@ -99,10 +99,10 @@ step "s2-master-drop-all-shards-on-selected" { SELECT master_drop_all_shards('se step "s2-create-non-distributed-table-on-selected" { CREATE TABLE select_of_insert_select_hash(id integer, data text); } step "s2-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); } -# permutations - INSERT/SELECT vs INSERT/SELECT +// permutations - INSERT/SELECT vs INSERT/SELECT permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-select" "s1-commit" "s1-select-count" -# permutations - INSERT/SELECT first operation on INSERT side +// permutations - INSERT/SELECT first operation on INSERT side permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-inserted" "s1-commit" "s1-select-count" @@ -118,7 +118,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-modify-mult permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-inserted" "s1-commit" "s1-select-count" permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-inserted" "s1-commit" "s1-select-count" -# permutations - INSERT/SELECT first operation on SELECT side +// permutations - INSERT/SELECT first operation on SELECT side permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-selected" "s1-commit" "s1-select-count" @@ -134,7 +134,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-modify-mult permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-selected" "s1-commit" "s1-select-count" permutation "s1-drop-on-selected" "s1-create-non-distributed-table-on-selected" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-selected" "s1-commit" "s1-select-count" -# permutations - INSERT/SELECT second on INSERT side +// permutations - INSERT/SELECT second on INSERT side permutation "s1-initialize" "s1-begin" "s1-update-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" @@ -149,7 +149,7 @@ permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards-on-inse permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-distribute-table-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" -# permutations - INSERT/SELECT second on SELECT side +// permutations - INSERT/SELECT second on SELECT side permutation "s1-initialize" "s1-begin" "s1-update-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_insert_select_vs_all_on_mx.spec b/src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec similarity index 73% rename from src/test/regress/specs/isolation_insert_select_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec index 63c234171..4073ef027 100644 --- a/src/test/regress/specs/isolation_insert_select_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec @@ -1,45 +1,14 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE dist_table(id integer, value integer); SELECT create_distributed_table('dist_table', 'id'); COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS dist_table CASCADE; @@ -48,7 +17,7 @@ teardown session "s1" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -88,7 +57,7 @@ step "s2-begin" BEGIN; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { @@ -142,7 +111,7 @@ step "s2-select-for-update" step "s2-coordinator-create-index-concurrently" { - CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id); + CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id); } step "s2-commit-worker" @@ -186,6 +155,6 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocat permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-begin" "s2-coordinator-drop" "s1-commit-worker" "s2-commit" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" -#Not able to test the next permutations, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" +//Not able to test the next permutations, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" diff --git a/src/test/regress/specs/isolation_insert_vs_all.spec b/src/test/regress/spec/isolation_insert_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_insert_vs_all.spec rename to src/test/regress/spec/isolation_insert_vs_all.spec index d70a96855..36448fe42 100644 --- a/src/test/regress/specs/isolation_insert_vs_all.spec +++ b/src/test/regress/spec/isolation_insert_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of INSERT in concurrent operations +// create range distributed table to test behavior of INSERT in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_distributed_table('insert_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS insert_hash CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -41,7 +41,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-insert" { INSERT INTO insert_hash VALUES(7, 'k'); } step "s2-insert-multi-row" { INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); } @@ -60,13 +60,13 @@ step "s2-table-size" { SELECT citus_total_relation_size('insert_hash'); } step "s2-master-modify-multiple-shards" { DELETE FROM insert_hash; } step "s2-distribute-table" { SELECT create_distributed_table('insert_hash', 'id'); } -# permutations - INSERT vs INSERT +// permutations - INSERT vs INSERT permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-multi-row" "s1-commit" "s1-select-count" -# permutations - INSERT first +// permutations - INSERT first permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-delete" "s1-commit" "s1-select-count" @@ -82,7 +82,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert" "s2-table-size" "s1-commit" " permutation "s1-initialize" "s1-begin" "s1-insert" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - INSERT second +// permutations - INSERT second permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert" "s1-commit" "s1-select-count" @@ -97,7 +97,7 @@ permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-insert" "s1-commit" " permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-insert" "s1-commit" "s1-select-count" -# permutations - multi row INSERT first +// permutations - multi row INSERT first permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-delete" "s1-commit" "s1-select-count" @@ -113,7 +113,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-table-size" "s1 permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - multi row INSERT second +// permutations - multi row INSERT second permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert-multi-row" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_insert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_insert_vs_all_on_mx.spec similarity index 73% rename from src/test/regress/specs/isolation_insert_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_insert_vs_all_on_mx.spec index 51e57eb6f..88f2e0603 100644 --- a/src/test/regress/specs/isolation_insert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_insert_vs_all_on_mx.spec @@ -1,54 +1,24 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE insert_table(id integer, value integer); SELECT create_distributed_table('insert_table', 'id'); COPY insert_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS insert_table CASCADE; SELECT citus_internal.restore_isolation_tester_func(); } + session "s1" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -83,7 +53,7 @@ step "s1-stop-connection" session "s2" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { @@ -177,5 +147,5 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert- permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection" -#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" +//Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" diff --git a/src/test/regress/specs/isolation_insert_vs_vacuum.spec b/src/test/regress/spec/isolation_insert_vs_vacuum.spec similarity index 85% rename from src/test/regress/specs/isolation_insert_vs_vacuum.spec rename to src/test/regress/spec/isolation_insert_vs_vacuum.spec index 050e6dc82..71991d1a2 100644 --- a/src/test/regress/specs/isolation_insert_vs_vacuum.spec +++ b/src/test/regress/spec/isolation_insert_vs_vacuum.spec @@ -39,9 +39,9 @@ step "s2-vacuum-full" VACUUM FULL test_insert_vacuum; } -# INSERT and VACUUM ANALYZE should not block each other. +// INSERT and VACUUM ANALYZE should not block each other. permutation "s1-begin" "s1-insert" "s2-vacuum-analyze" "s1-commit" -# INSERT and VACUUM FULL should block each other. +// INSERT and VACUUM FULL should block each other. permutation "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit" diff --git a/src/test/regress/specs/isolation_master_append_table.spec b/src/test/regress/spec/isolation_master_append_table.spec similarity index 95% rename from src/test/regress/specs/isolation_master_append_table.spec rename to src/test/regress/spec/isolation_master_append_table.spec index 728deaf56..9f58ffd1c 100644 --- a/src/test/regress/specs/isolation_master_append_table.spec +++ b/src/test/regress/spec/isolation_master_append_table.spec @@ -60,5 +60,5 @@ step "s2-commit" COMMIT; } -# concurrent master_append_table_to_shard tests +// concurrent master_append_table_to_shard tests permutation "s1-begin" "s2-begin" "s1-master_append_table_to_shard" "s2-master_append_table_to_shard" "s1-commit" "s2-commit" diff --git a/src/test/regress/specs/isolation_master_apply_delete.spec b/src/test/regress/spec/isolation_master_apply_delete.spec similarity index 95% rename from src/test/regress/specs/isolation_master_apply_delete.spec rename to src/test/regress/spec/isolation_master_apply_delete.spec index e20c50215..7b5f36219 100644 --- a/src/test/regress/specs/isolation_master_apply_delete.spec +++ b/src/test/regress/spec/isolation_master_apply_delete.spec @@ -56,7 +56,7 @@ step "s2-commit" COMMIT; } -#concurrent master_apply_delete_command vs master_apply_delete_command +//concurrent master_apply_delete_command vs master_apply_delete_command permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_all_shard" "s2-master_apply_delete_command_all_shard" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_all_shard" "s2-master_apply_delete_command_row" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_row" "s2-master_apply_delete_command_all_shard" "s1-commit" "s2-commit" diff --git a/src/test/regress/specs/isolation_master_update_node.spec b/src/test/regress/spec/isolation_master_update_node.spec similarity index 100% rename from src/test/regress/specs/isolation_master_update_node.spec rename to src/test/regress/spec/isolation_master_update_node.spec diff --git a/src/test/regress/specs/isolation_modify_with_subquery_vs_dml.spec b/src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec similarity index 97% rename from src/test/regress/specs/isolation_modify_with_subquery_vs_dml.spec rename to src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec index 9af6aba72..d3e193bd3 100644 --- a/src/test/regress/specs/isolation_modify_with_subquery_vs_dml.spec +++ b/src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec @@ -81,7 +81,7 @@ step "s2-commit" COMMIT; } -# tests to check locks on subqueries are taken +// tests to check locks on subqueries are taken permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-insert_to_events_test_table" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-update_events_test_table" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-delete_events_test_table" "s2-commit" "s1-commit" diff --git a/src/test/regress/specs/isolation_multi_shard_modify_vs_all.spec b/src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec similarity index 89% rename from src/test/regress/specs/isolation_multi_shard_modify_vs_all.spec rename to src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec index cb86f9f76..6424863a1 100644 --- a/src/test/regress/specs/isolation_multi_shard_modify_vs_all.spec +++ b/src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec @@ -136,34 +136,34 @@ step "s2-commit" COMMIT; } -# test with parallel connections +// test with parallel connections permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-select" "s1-commit" "s2-select" "s2-commit" permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-update_all_value_1" "s1-commit" "s2-commit" -# test without deadlock prevention (first does not conflict, second does) +// test without deadlock prevention (first does not conflict, second does) permutation "s1-begin" "s1-update_even_concurrently" "s2-begin" "s2-update_odd_concurrently" "s1-commit" "s2-commit" permutation "s1-begin" "s1-update_even_concurrently" "s2-begin" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" -# test with shard pruning (should not conflict) +// test with shard pruning (should not conflict) permutation "s1-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" "s2-select" -# test with inserts +// test with inserts permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-insert-to-table" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-insert-into-select" "s1-commit" "s2-commit" "s2-select" -# multi-shard update affecting the same rows +// multi-shard update affecting the same rows permutation "s1-begin" "s2-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" -# multi-shard update affecting the different rows +// multi-shard update affecting the different rows permutation "s1-begin" "s2-begin" "s2-update_value_1_of_1_or_3_to_8" "s1-update_value_1_of_2_or_4_to_5" "s2-commit" "s1-commit" -# test with sequential connections, sequential tests should not block each other -# if they are targeting different shards. If multiple connections updating the same -# row, second one must wait for the first one. +// test with sequential connections, sequential tests should not block each other +// if they are targeting different shards. If multiple connections updating the same +// row, second one must wait for the first one. permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_all_value_1" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_all_value_1" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" "s2-select" -# multi-shard update affecting the same rows +// multi-shard update affecting the same rows permutation "s1-begin" "s2-begin" "s1-change_connection_mode_to_sequential" "s2-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" -# multi-shard update affecting the different rows +// multi-shard update affecting the different rows permutation "s1-begin" "s2-begin" "s1-change_connection_mode_to_sequential" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_1_or_3_to_8" "s1-update_value_1_of_2_or_4_to_5" "s1-commit" "s2-commit" diff --git a/src/test/regress/specs/isolation_multiuser_locking.spec b/src/test/regress/spec/isolation_multiuser_locking.spec similarity index 94% rename from src/test/regress/specs/isolation_multiuser_locking.spec rename to src/test/regress/spec/isolation_multiuser_locking.spec index 30b19d7fb..af225ce61 100644 --- a/src/test/regress/specs/isolation_multiuser_locking.spec +++ b/src/test/regress/spec/isolation_multiuser_locking.spec @@ -25,7 +25,7 @@ teardown session "s1" -# run_command_on_placements is done in a separate step because the setup is executed as a single transaction +// run_command_on_placements is done in a separate step because the setup is executed as a single transaction step "s1-grant" { SET ROLE test_user_1; @@ -117,17 +117,17 @@ step "s2-commit" COMMIT; } -# REINDEX +// REINDEX permutation "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-commit" "s1-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-insert" "s2-commit" "s1-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s1-reindex" "s2-insert" "s1-insert" "s1-commit" "s2-commit" -# CREATE INDEX +// CREATE INDEX permutation "s1-begin" "s2-begin" "s2-index" "s1-insert" "s2-commit" "s1-commit" "s2-drop-index" permutation "s1-grant" "s1-begin" "s2-begin" "s2-insert" "s1-index" "s2-insert" "s2-commit" "s1-commit" "s1-drop-index" permutation "s1-grant" "s1-begin" "s2-begin" "s1-index" "s2-index" "s1-insert" "s1-commit" "s2-commit" "s1-drop-index" "s2-drop-index" -# TRUNCATE +// TRUNCATE permutation "s1-begin" "s2-begin" "s2-truncate" "s1-insert" "s2-commit" "s1-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-insert" "s1-insert" "s1-commit" "s2-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" diff --git a/src/test/regress/spec/isolation_mx_common.include.spec b/src/test/regress/spec/isolation_mx_common.include.spec new file mode 100644 index 000000000..4b6127660 --- /dev/null +++ b/src/test/regress/spec/isolation_mx_common.include.spec @@ -0,0 +1,35 @@ +// Create and use UDF to send commands from the same connection. Also make the cluster +// ready for testing MX functionalities. +setup +{ + CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) + RETURNS void + LANGUAGE C STRICT VOLATILE + AS 'citus', $$start_session_level_connection_to_node$$; + + CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) + RETURNS void + LANGUAGE C STRICT VOLATILE + AS 'citus', $$run_commands_on_session_level_connection_to_node$$; + + CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() + RETURNS void + LANGUAGE C STRICT VOLATILE + AS 'citus', $$stop_session_level_connection_to_node$$; + + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + + -- start_metadata_sync_to_node can not be run inside a transaction block + -- following is a workaround to overcome that + -- port numbers are hard coded at the moment + SELECT master_run_on_worker( + ARRAY['localhost']::text[], + ARRAY[57636]::int[], + ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], + false) + FROM pg_dist_node; + + SET citus.replication_model to streaming; + SET citus.shard_replication_factor TO 1; +} diff --git a/src/test/regress/specs/isolation_partitioned_copy_vs_all.spec b/src/test/regress/spec/isolation_partitioned_copy_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_partitioned_copy_vs_all.spec rename to src/test/regress/spec/isolation_partitioned_copy_vs_all.spec index 2f00dc770..83b683845 100644 --- a/src/test/regress/specs/isolation_partitioned_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_partitioned_copy_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create append distributed table to test behavior of COPY in concurrent operations +// create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -13,13 +13,13 @@ setup SELECT create_distributed_table('partitioned_copy', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS partitioned_copy CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } @@ -54,7 +54,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-copy" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } @@ -82,10 +82,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM partitioned_copy; } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); } -# permutations - COPY vs COPY +// permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" -# permutations - COPY first +// permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" @@ -103,7 +103,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shar permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - COPY second +// permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_progress_monitoring.spec b/src/test/regress/spec/isolation_progress_monitoring.spec similarity index 88% rename from src/test/regress/specs/isolation_progress_monitoring.spec rename to src/test/regress/spec/isolation_progress_monitoring.spec index fae840a69..225451ec8 100644 --- a/src/test/regress/specs/isolation_progress_monitoring.spec +++ b/src/test/regress/spec/isolation_progress_monitoring.spec @@ -1,8 +1,8 @@ -# Isolation tests for checking the progress monitoring infrastructure -# We create three different processes, two of the type "1337" and one of type "3778" -# We utilize advisory locks to control steps of the processes -# Different locks are held for each step so that the processes stop at each step and -# we can see their progress. +// Isolation tests for checking the progress monitoring infrastructure +// We create three different processes, two of the type "1337" and one of type "3778" +// We utilize advisory locks to control steps of the processes +// Different locks are held for each step so that the processes stop at each step and +// we can see their progress. setup { diff --git a/src/test/regress/specs/isolation_range_copy_vs_all.spec b/src/test/regress/spec/isolation_range_copy_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_range_copy_vs_all.spec rename to src/test/regress/spec/isolation_range_copy_vs_all.spec index b8f1d3d0f..590553486 100644 --- a/src/test/regress/specs/isolation_range_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_range_copy_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create append distributed table to test behavior of COPY in concurrent operations +// create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_distributed_table('range_copy', 'id', 'append'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS range_copy CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } @@ -53,7 +53,7 @@ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-copy" { COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY range_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } @@ -82,10 +82,10 @@ step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELE step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); } -# permutations - COPY vs COPY +// permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" -# permutations - COPY first +// permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" @@ -107,7 +107,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-apply-delete-command permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - COPY second +// permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_ref2ref_foreign_keys.spec b/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec similarity index 82% rename from src/test/regress/specs/isolation_ref2ref_foreign_keys.spec rename to src/test/regress/spec/isolation_ref2ref_foreign_keys.spec index bcfb459fd..6137df058 100644 --- a/src/test/regress/specs/isolation_ref2ref_foreign_keys.spec +++ b/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec @@ -151,49 +151,49 @@ step "s2-commit" COMMIT; } -# Check that we get necessary resource locks +// Check that we get necessary resource locks -# Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode. +// Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode. permutation "s2-begin" "s2-update-table-1" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-delete-table-1" "s1-view-locks" "s2-rollback" "s1-view-locks" -# Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode. +// Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode. permutation "s2-begin" "s2-update-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-delete-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks" -# Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode. +// Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode. permutation "s2-begin" "s2-update-table-3" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-delete-table-3" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks" -# Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode. +// Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode. permutation "s2-begin" "s2-insert-table-1" "s1-view-locks" "s2-rollback" "s1-view-locks" -# Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. +// Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. permutation "s2-begin" "s2-insert-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks" -# Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. +// Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. permutation "s2-begin" "s2-insert-table-3" "s1-view-locks" "s2-rollback" "s1-view-locks" -# Now some concurrent operations +// Now some concurrent operations -# Updates/Deletes from ref_table_1 cascade to ref_table_2, so DML on ref_table_2 should block -# Case 1. UPDATE -> DELETE +// Updates/Deletes from ref_table_1 cascade to ref_table_2, so DML on ref_table_2 should block +// Case 1. UPDATE -> DELETE permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-delete-table-2" "s2-commit" "s1-commit" "s1-select-table-2" -# Case 2. UPDATE -> INSERT +// Case 2. UPDATE -> INSERT permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-insert-table-2" "s2-commit" "s1-commit" "s1-select-table-2" -# Case 3. UPDATE -> UPDATE +// Case 3. UPDATE -> UPDATE permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-2" -# Case 4. DELETE -> DELETE +// Case 4. DELETE -> DELETE permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-delete-table-2" "s2-commit" "s1-commit" "s1-select-table-2" -# Case 5. DELETE -> INSERT +// Case 5. DELETE -> INSERT permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-insert-table-2" "s2-commit" "s1-commit" "s1-select-table-2" -# Case 6. DELETE -> UPDATE +// Case 6. DELETE -> UPDATE permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-2" -# Deletes from ref_table_1 can transitively cascade to ref_table_3, so DML on ref_table_3 should block -# Case 1. DELETE -> DELETE +// Deletes from ref_table_1 can transitively cascade to ref_table_3, so DML on ref_table_3 should block +// Case 1. DELETE -> DELETE permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-delete-table-3" "s2-commit" "s1-commit" "s1-select-table-3" -# Case 2. DELETE -> INSERT, should error out +// Case 2. DELETE -> INSERT, should error out permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-insert-table-3" "s2-commit" "s1-commit" "s1-select-table-3" -# Case 3. DELETE -> UPDATE +// Case 3. DELETE -> UPDATE permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3" -# Any DML on any of ref_table_{1,2,3} should block others from DML in the foreign constraint graph ... +// Any DML on any of ref_table_{1,2,3} should block others from DML in the foreign constraint graph ... permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s1-update-table-3" "s2-insert-table-1" "s1-commit" "s2-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-3" @@ -201,7 +201,7 @@ permutation "s1-begin" "s2-begin" "s1-update-table-2" "s2-insert-table-1" "s1-co permutation "s1-begin" "s2-begin" "s2-insert-table-2" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s1-update-table-3" "s2-insert-table-2" "s1-commit" "s2-commit" "s1-select-table-3" -# DMLs shouldn't block select on tables in the same foreign constraint graph +// DMLs shouldn't block select on tables in the same foreign constraint graph permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-1" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-2" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-3" "s2-commit" "s1-commit" diff --git a/src/test/regress/specs/isolation_ref2ref_foreign_keys_on_mx.spec b/src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec similarity index 82% rename from src/test/regress/specs/isolation_ref2ref_foreign_keys_on_mx.spec rename to src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec index d9f0e269e..c59963188 100644 --- a/src/test/regress/specs/isolation_ref2ref_foreign_keys_on_mx.spec +++ b/src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec @@ -1,20 +1,7 @@ +#include "isolation_mx_common.include.spec" + setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block. - -- Following is a workaround to overcome that. Port numbers are hard coded - -- at the moment. - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table_1'); @@ -136,18 +123,18 @@ step "s2-stop-connection" SELECT stop_session_level_connection_to_node(); } -# Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode. +// Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode. permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" -# Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode. +// Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode. permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" -# Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode. +// Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode. permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" -# Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode. +// Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode. permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" -# Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. +// Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" -# Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. +// Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" diff --git a/src/test/regress/specs/isolation_ref_select_for_update_vs_all_on_mx.spec b/src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec similarity index 70% rename from src/test/regress/specs/isolation_ref_select_for_update_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec index 2d86eac43..8f7e89945 100644 --- a/src/test/regress/specs/isolation_ref_select_for_update_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec @@ -1,44 +1,13 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE ref_table(id integer, value integer); SELECT create_reference_table('ref_table'); } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS ref_table CASCADE; @@ -47,7 +16,7 @@ teardown session "s1" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -77,7 +46,7 @@ step "s1-stop-connection" session "s2" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { diff --git a/src/test/regress/specs/isolation_ref_update_delete_upsert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec similarity index 60% rename from src/test/regress/specs/isolation_ref_update_delete_upsert_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec index 15655abae..952bf079c 100644 --- a/src/test/regress/specs/isolation_ref_update_delete_upsert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec @@ -1,44 +1,13 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE ref_table(id integer, value integer); SELECT create_reference_table('ref_table'); } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS ref_table CASCADE; @@ -52,7 +21,7 @@ step "s1-add-primary-key" ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -92,7 +61,7 @@ step "s1-stop-connection" session "s2" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { @@ -153,5 +122,5 @@ permutation "s1-add-primary-key""s1-start-session-level-connection" "s1-begin-on permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-select-ref-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-drop" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" -#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" +//Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" diff --git a/src/test/regress/specs/isolation_reference_copy_vs_all.spec b/src/test/regress/spec/isolation_reference_copy_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_reference_copy_vs_all.spec rename to src/test/regress/spec/isolation_reference_copy_vs_all.spec index 8179fb7dc..b3e7d366e 100644 --- a/src/test/regress/specs/isolation_reference_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_reference_copy_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create append distributed table to test behavior of COPY in concurrent operations +// create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_reference_table('reference_copy'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS reference_copy CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } @@ -51,7 +51,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-copy" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } @@ -81,10 +81,10 @@ step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('reference_copy step "s2-create-non-distributed-table" { CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s2-distribute-table" { SELECT create_reference_table('reference_copy'); } -# permutations - COPY vs COPY +// permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" -# permutations - COPY first +// permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" @@ -104,7 +104,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1 permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - COPY second +// permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_reference_on_mx.spec b/src/test/regress/spec/isolation_reference_on_mx.spec similarity index 77% rename from src/test/regress/specs/isolation_reference_on_mx.spec rename to src/test/regress/spec/isolation_reference_on_mx.spec index e97dc776e..6b63e4162 100644 --- a/src/test/regress/specs/isolation_reference_on_mx.spec +++ b/src/test/regress/spec/isolation_reference_on_mx.spec @@ -1,44 +1,14 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - CREATE TABLE ref_table(user_id int, value_1 int); SELECT create_reference_table('ref_table'); INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71); } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE ref_table; @@ -52,7 +22,7 @@ step "s1-begin" BEGIN; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { diff --git a/src/test/regress/specs/isolation_replace_wait_function.spec b/src/test/regress/spec/isolation_replace_wait_function.spec similarity index 69% rename from src/test/regress/specs/isolation_replace_wait_function.spec rename to src/test/regress/spec/isolation_replace_wait_function.spec index 42b05e4b6..5790ef57f 100644 --- a/src/test/regress/specs/isolation_replace_wait_function.spec +++ b/src/test/regress/spec/isolation_replace_wait_function.spec @@ -1,7 +1,7 @@ -# check that replace_isolation_tester_func correctly replaces the functions isolation -# tester uses while searching for locks. If those functions aren't correctly replaced -# this test will timeout, since isolation tester will never notice that s2 is blocked -# by s1 on a lock it's taken out on one of the workers +// check that replace_isolation_tester_func correctly replaces the functions isolation +// tester uses while searching for locks. If those functions aren't correctly replaced +// this test will timeout, since isolation tester will never notice that s2 is blocked +// by s1 on a lock it's taken out on one of the workers setup { diff --git a/src/test/regress/specs/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec similarity index 82% rename from src/test/regress/specs/isolation_replicate_reference_tables_to_coordinator.spec rename to src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index 46fd9222d..059b46782 100644 --- a/src/test/regress/specs/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -110,25 +110,25 @@ step "s2-active-transactions" SELECT count(*) FROM get_global_active_transactions(); } -# we disable the daemon during the regression tests in order to get consistent results -# thus we manually issue the deadlock detection +// we disable the daemon during the regression tests in order to get consistent results +// thus we manually issue the deadlock detection session "deadlock-checker" -# we issue the checker not only when there are deadlocks to ensure that we never cancel -# backend inappropriately +// we issue the checker not only when there are deadlocks to ensure that we never cancel +// backend inappropriately step "deadlock-checker-call" { SELECT check_distributed_deadlocks(); } -# verify that locks on the placement of the reference table on the coordinator is -# taken into account when looking for distributed deadlocks +// verify that locks on the placement of the reference table on the coordinator is +// taken into account when looking for distributed deadlocks permutation "s1-begin" "s2-begin" "s1-update-dist-table" "s2-lock-ref-table-placement-on-coordinator" "s1-lock-ref-table-placement-on-coordinator" "s2-update-dist-table" "deadlock-checker-call" "s1-end" "s2-end" -# verify that *_dist_stat_activity() functions return the correct result when query -# has a task on the coordinator. +// verify that *_dist_stat_activity() functions return the correct result when query +// has a task on the coordinator. permutation "s1-begin" "s2-begin" "s1-update-ref-table" "s2-sleep" "s2-view-dist" "s2-view-worker" "s2-end" "s1-end" -# verify that get_*_active_transactions() functions return the correct result when -# the query has a task on the coordinator. +// verify that get_*_active_transactions() functions return the correct result when +// the query has a task on the coordinator. permutation "s1-begin" "s2-begin" "s1-update-ref-table" "s2-active-transactions" "s1-end" "s2-end" diff --git a/src/test/regress/specs/isolation_select_for_update.spec b/src/test/regress/spec/isolation_select_for_update.spec similarity index 100% rename from src/test/regress/specs/isolation_select_for_update.spec rename to src/test/regress/spec/isolation_select_for_update.spec diff --git a/src/test/regress/specs/isolation_select_vs_all.spec b/src/test/regress/spec/isolation_select_vs_all.spec similarity index 96% rename from src/test/regress/specs/isolation_select_vs_all.spec rename to src/test/regress/spec/isolation_select_vs_all.spec index cfeeeb6f3..49f0740c0 100644 --- a/src/test/regress/specs/isolation_select_vs_all.spec +++ b/src/test/regress/spec/isolation_select_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of SELECT in concurrent operations +// create range distributed table to test behavior of SELECT in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_distributed_table('select_append', 'id', 'append'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS select_append CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } @@ -49,7 +49,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-router-select" { SELECT * FROM select_append WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM select_append ORDER BY 1, 2; } @@ -76,7 +76,7 @@ step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELE step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); } -# permutations - SELECT vs SELECT +// permutations - SELECT vs SELECT permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" @@ -87,7 +87,7 @@ permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-router-selec permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" -# permutations - router SELECT first +// permutations - router SELECT first permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-update" "s1-commit" "s1-select-count" @@ -106,7 +106,7 @@ permutation "s1-initialize" "s1-begin" "s2-master-apply-delete-command" "s1-comm permutation "s1-initialize" "s1-begin" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-router-select" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - router SELECT second +// permutations - router SELECT second permutation "s1-initialize" "s1-begin" "s1-insert" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-router-select" "s1-commit" "s1-select-count" @@ -124,7 +124,7 @@ permutation "s1-initialize" "s1-begin" "s1-master-apply-delete-command" "s1-comm permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-router-select" "s1-commit" "s1-select-count" -# permutations - real-time SELECT first +// permutations - real-time SELECT first permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-update" "s1-commit" "s1-select-count" @@ -141,7 +141,7 @@ permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-table-size" "s1 permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-real-time-select" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - real-time SELECT second +// permutations - real-time SELECT second permutation "s1-initialize" "s1-begin" "s1-insert" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-real-time-select" "s1-commit" "s1-select-count" @@ -157,7 +157,7 @@ permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-real-time-select" "s1 permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-real-time-select" "s1-commit" "s1-select-count" -# permutations - task-tracker SELECT first +// permutations - task-tracker SELECT first permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-update" "s1-commit" "s1-select-count" @@ -174,7 +174,7 @@ permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-table-size" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-task-tracker-select" "s2-distribute-table" "s1-commit" "s1-select-count" -# permutations - task-tracker SELECT second +// permutations - task-tracker SELECT second permutation "s1-initialize" "s1-begin" "s1-insert" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-task-tracker-select" "s1-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_select_vs_all_on_mx.spec b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec similarity index 69% rename from src/test/regress/specs/isolation_select_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_select_vs_all_on_mx.spec index 66261a926..b5ae28371 100644 --- a/src/test/regress/specs/isolation_select_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec @@ -1,45 +1,14 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE select_table(id integer, value integer); SELECT create_distributed_table('select_table', 'id'); COPY select_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS select_table CASCADE; @@ -48,7 +17,7 @@ teardown session "s1" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -83,7 +52,7 @@ step "s2-begin" BEGIN; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { diff --git a/src/test/regress/specs/isolation_shouldhaveshards.spec b/src/test/regress/spec/isolation_shouldhaveshards.spec similarity index 92% rename from src/test/regress/specs/isolation_shouldhaveshards.spec rename to src/test/regress/spec/isolation_shouldhaveshards.spec index fd75fa422..4df530540 100644 --- a/src/test/regress/specs/isolation_shouldhaveshards.spec +++ b/src/test/regress/spec/isolation_shouldhaveshards.spec @@ -1,5 +1,5 @@ -# the test expects to have zero nodes in pg_dist_node at the beginning -# add single one of the nodes for the purpose of the test +// the test expects to have zero nodes in pg_dist_node at the beginning +// add single one of the nodes for the purpose of the test setup { SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; diff --git a/src/test/regress/specs/isolation_transaction_recovery.spec b/src/test/regress/spec/isolation_transaction_recovery.spec similarity index 87% rename from src/test/regress/specs/isolation_transaction_recovery.spec rename to src/test/regress/spec/isolation_transaction_recovery.spec index 5c1ee7557..a86894ed7 100644 --- a/src/test/regress/specs/isolation_transaction_recovery.spec +++ b/src/test/regress/spec/isolation_transaction_recovery.spec @@ -38,8 +38,8 @@ step "s2-recover" SELECT recover_prepared_transactions(); } -# Recovery and 2PCs should not block each other +// Recovery and 2PCs should not block each other permutation "s1-begin" "s1-recover" "s2-insert" "s1-commit" -# Recovery should not run concurrently +// Recovery should not run concurrently permutation "s1-begin" "s1-recover" "s2-recover" "s1-commit" diff --git a/src/test/regress/specs/isolation_truncate_vs_all.spec b/src/test/regress/spec/isolation_truncate_vs_all.spec similarity index 95% rename from src/test/regress/specs/isolation_truncate_vs_all.spec rename to src/test/regress/spec/isolation_truncate_vs_all.spec index 98bf9f81f..901f8e6f8 100644 --- a/src/test/regress/specs/isolation_truncate_vs_all.spec +++ b/src/test/regress/spec/isolation_truncate_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of TRUNCATE in concurrent operations +// create range distributed table to test behavior of TRUNCATE in concurrent operations setup { SELECT citus_internal.replace_isolation_tester_func(); @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('truncate_append', 'id', 'append'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS truncate_append CASCADE; @@ -21,7 +21,7 @@ teardown SELECT citus_internal.restore_isolation_tester_func(); } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -43,7 +43,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-truncate" { TRUNCATE truncate_append; } @@ -63,10 +63,10 @@ step "s2-distribute-table" { SELECT create_distributed_table('truncate_append', step "s2-select" { SELECT * FROM truncate_append ORDER BY 1, 2; } step "s2-commit" { COMMIT; } -# permutations - TRUNCATE vs TRUNCATE +// permutations - TRUNCATE vs TRUNCATE permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" -# permutations - TRUNCATE first +// permutations - TRUNCATE first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" @@ -81,7 +81,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-apply permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-drop-all-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s2-begin" "s1-truncate" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" -# permutations - TRUNCATE second +// permutations - TRUNCATE second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" diff --git a/src/test/regress/specs/isolation_truncate_vs_all_on_mx.spec b/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec similarity index 69% rename from src/test/regress/specs/isolation_truncate_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec index caeb29e57..445005e66 100644 --- a/src/test/regress/specs/isolation_truncate_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec @@ -1,45 +1,14 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE truncate_table(id integer, value integer); SELECT create_distributed_table('truncate_table', 'id'); COPY truncate_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS truncate_table CASCADE; @@ -53,7 +22,7 @@ step "s1-begin" BEGIN; } -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -118,7 +87,7 @@ step "s1-commit" session "s2" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { diff --git a/src/test/regress/specs/isolation_update_delete_upsert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec similarity index 58% rename from src/test/regress/specs/isolation_update_delete_upsert_vs_all_on_mx.spec rename to src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec index f2dca47df..5c473d2ed 100644 --- a/src/test/regress/specs/isolation_update_delete_upsert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec @@ -1,44 +1,13 @@ -# Create and use UDF to send commands from the same connection. Also make the cluster -# ready for testing MX functionalities. +#include "isolation_mx_common.include.spec" + setup { - CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$start_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$run_commands_on_session_level_connection_to_node$$; - - CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node() - RETURNS void - LANGUAGE C STRICT VOLATILE - AS 'citus', $$stop_session_level_connection_to_node$$; - - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - - -- start_metadata_sync_to_node can not be run inside a transaction block - -- following is a workaround to overcome that - -- port numbers are hard coded at the moment - SELECT master_run_on_worker( - ARRAY['localhost']::text[], - ARRAY[57636]::int[], - ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[], - false) - FROM pg_dist_node; - - SET citus.replication_model to streaming; - SET citus.shard_replication_factor TO 1; - CREATE TABLE dist_table(id integer, value integer); SELECT create_distributed_table('dist_table', 'id'); } -# Create and use UDF to close the connection opened in the setup step. Also return the cluster -# back to the initial state. +// Create and use UDF to close the connection opened in the setup step. Also return the cluster +// back to the initial state. teardown { DROP TABLE IF EXISTS dist_table CASCADE; @@ -47,7 +16,7 @@ teardown session "s1" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s1-start-session-level-connection" { @@ -82,7 +51,7 @@ step "s1-stop-connection" session "s2" -# We do not need to begin a transaction on coordinator, since it will be open on workers. +// We do not need to begin a transaction on coordinator, since it will be open on workers. step "s2-start-session-level-connection" { @@ -142,5 +111,5 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-alter-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" -#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 -#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" +//Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 +//permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" diff --git a/src/test/regress/specs/isolation_update_node.spec b/src/test/regress/spec/isolation_update_node.spec similarity index 86% rename from src/test/regress/specs/isolation_update_node.spec rename to src/test/regress/spec/isolation_update_node.spec index 7c9f51c20..b48d5415c 100644 --- a/src/test/regress/specs/isolation_update_node.spec +++ b/src/test/regress/spec/isolation_update_node.spec @@ -94,13 +94,13 @@ step "s2-commit" COMMIT; } -# session 1 updates node 1, session 2 updates node 2, should be ok +// session 1 updates node 1, session 2 updates node 2, should be ok permutation "s1-begin" "s1-update-node-1" "s2-update-node-2" "s1-commit" "s1-show-nodes" -# sessions 1 updates node 1, session 2 tries to do the same +// sessions 1 updates node 1, session 2 tries to do the same permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-update-node-1" "s1-commit" "s2-abort" "s1-show-nodes" -# master_update_node should block start_metadata_sync_to_node. Note that we -# cannot run start_metadata_sync_to_node in a transaction, so we're not -# testing the reverse order here. +// master_update_node should block start_metadata_sync_to_node. Note that we +// cannot run start_metadata_sync_to_node in a transaction, so we're not +// testing the reverse order here. permutation "s1-begin" "s1-update-node-1" "s2-start-metadata-sync-node-2" "s1-commit" "s2-verify-metadata" diff --git a/src/test/regress/specs/isolation_update_node_lock_writes.spec b/src/test/regress/spec/isolation_update_node_lock_writes.spec similarity index 94% rename from src/test/regress/specs/isolation_update_node_lock_writes.spec rename to src/test/regress/spec/isolation_update_node_lock_writes.spec index f174802cd..25d99b3e6 100644 --- a/src/test/regress/specs/isolation_update_node_lock_writes.spec +++ b/src/test/regress/spec/isolation_update_node_lock_writes.spec @@ -65,7 +65,7 @@ step "s2-commit" COMMIT; } -# session 1 updates node 1, session 2 writes should be blocked +// session 1 updates node 1, session 2 writes should be blocked permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-insert" "s1-commit" "s2-abort" permutation "s2-begin" "s2-insert" "s1-update-node-1" "s2-commit" diff --git a/src/test/regress/specs/isolation_update_vs_all.spec b/src/test/regress/spec/isolation_update_vs_all.spec similarity index 94% rename from src/test/regress/specs/isolation_update_vs_all.spec rename to src/test/regress/spec/isolation_update_vs_all.spec index 6abc8fcb9..c7adc75e3 100644 --- a/src/test/regress/specs/isolation_update_vs_all.spec +++ b/src/test/regress/spec/isolation_update_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of UPDATE in concurrent operations +// create range distributed table to test behavior of UPDATE in concurrent operations setup { SELECT citus_internal.replace_isolation_tester_func(); @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('update_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS update_hash CASCADE; @@ -21,7 +21,7 @@ teardown SELECT citus_internal.restore_isolation_tester_func(); } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -43,7 +43,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-update" { UPDATE update_hash SET data = 'l' WHERE id = 4; } @@ -61,10 +61,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM update_hash; } step "s2-distribute-table" { SELECT create_distributed_table('update_hash', 'id'); } step "s2-commit" { COMMIT; } -# permutations - UPDATE vs UPDATE +// permutations - UPDATE vs UPDATE permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-update" "s1-commit" "s2-commit" "s1-select-count" -# permutations - UPDATE first +// permutations - UPDATE first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" @@ -78,7 +78,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-table-size" "s permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" -# permutations - UPDATE second +// permutations - UPDATE second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-update" "s1-commit" "s2-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_upsert_vs_all.spec b/src/test/regress/spec/isolation_upsert_vs_all.spec similarity index 95% rename from src/test/regress/specs/isolation_upsert_vs_all.spec rename to src/test/regress/spec/isolation_upsert_vs_all.spec index 0e7fe67f7..a168ab79a 100644 --- a/src/test/regress/specs/isolation_upsert_vs_all.spec +++ b/src/test/regress/spec/isolation_upsert_vs_all.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create range distributed table to test behavior of UPSERT in concurrent operations +// create range distributed table to test behavior of UPSERT in concurrent operations setup { SELECT citus_internal.replace_isolation_tester_func(); @@ -13,7 +13,7 @@ setup SELECT create_distributed_table('upsert_hash', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS upsert_hash CASCADE; @@ -21,7 +21,7 @@ teardown SELECT citus_internal.restore_isolation_tester_func(); } -# session 1 +// session 1 session "s1" step "s1-initialize" { COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-begin" { BEGIN; } @@ -44,7 +44,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-upsert" { INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; } @@ -63,10 +63,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM upsert_hash; } step "s2-distribute-table" { SELECT create_distributed_table('upsert_hash', 'id'); } step "s2-commit" { COMMIT; } -# permutations - UPSERT vs UPSERT +// permutations - UPSERT vs UPSERT permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" -# permutations - UPSERT first +// permutations - UPSERT first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" @@ -81,7 +81,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-table-size" "s permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" -# permutations - UPSERT second +// permutations - UPSERT second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" diff --git a/src/test/regress/specs/isolation_validate_vs_insert.spec b/src/test/regress/spec/isolation_validate_vs_insert.spec similarity index 81% rename from src/test/regress/specs/isolation_validate_vs_insert.spec rename to src/test/regress/spec/isolation_validate_vs_insert.spec index 81d2d1c77..05e9f81f5 100644 --- a/src/test/regress/specs/isolation_validate_vs_insert.spec +++ b/src/test/regress/spec/isolation_validate_vs_insert.spec @@ -1,8 +1,8 @@ -# -# How we organize this isolation test spec, is explained at README.md file in this directory. -# +// +// How we organize this isolation test spec, is explained at README.md file in this directory. +// -# create distributed table to test behavior of VALIDATE in concurrent operations +// create distributed table to test behavior of VALIDATE in concurrent operations setup { SET citus.shard_replication_factor TO 1; @@ -10,13 +10,13 @@ setup SELECT create_distributed_table('constrained_table', 'id'); } -# drop distributed table +// drop distributed table teardown { DROP TABLE IF EXISTS constrained_table CASCADE; } -# session 1 +// session 1 session "s1" step "s1-initialize" { INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); } step "s1-begin" { BEGIN; } @@ -24,7 +24,7 @@ step "s1-add-constraint" { ALTER TABLE constrained_table ADD CONSTRAINT check_co step "s1-validate" { ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; } step "s1-commit" { COMMIT; } -# session 2 +// session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-insert" { INSERT INTO constrained_table VALUES(10, 10); } @@ -32,7 +32,7 @@ step "s2-insert-invalid" { INSERT INTO constrained_table VALUES(100, 100); } step "s2-select" { SELECT sum(int_data) FROM constrained_table; } step "s2-commit" { COMMIT; } -# permutations - check read and write are not blocked during validate queries +// permutations - check read and write are not blocked during validate queries permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s1-validate" "s2-insert" "s1-commit" "s2-commit" permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s1-validate" "s2-select" "s1-commit" "s2-commit" permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s2-insert" "s1-validate" "s1-commit" "s2-commit"