Refactor isolation tests (#3062)

Currently in mx isolation tests the setup is the same except the creation of tables. Isolation framework lets us define multiple `setup` stages, therefore I thought that we can put the `mx_setup` to one file and prepend this prior to running tests. 

How the structure works:
- cpp is used before running isolation tests to preprocess spec files. This way we can include any file we want to. Currently this is used to include mx common part.
- spec files are put to `/build/specs` for clear separation between generated files and template files
- a symbolic link is created for `/expected` in `build/expected/`.
- when running isolation tests, as the `inputdir`, `build` is passed so it runs the spec files from `build/specs` and checks the expected output from `build/expected`.

`/specs` is renamed as `/spec` because postgres first look at the `specs` file under current directory, so this is renamed to avoid that since we are running the isolation tests from `build/specs` now.

Note: now we use `//` instead of `#` in comments in spec files, because cpp interprets `#` as a directive and it ignores `//`.
pull/3271/head
SaitTalhaNisanci 2019-12-10 18:12:54 +03:00 committed by Jelte Fennema
parent 5395ce6480
commit 8e5041885d
79 changed files with 797 additions and 1228 deletions

View File

@ -6,6 +6,7 @@
/tmp_upgrade/ /tmp_upgrade/
/tmp_citus_upgrade/ /tmp_citus_upgrade/
/tmp_citus_tarballs/ /tmp_citus_tarballs/
/build/
/results/ /results/
/log/ /log/

View File

@ -26,6 +26,8 @@ MULTI_REGRESS_OPTS = --inputdir=$(citus_abs_srcdir) $(pg_regress_locale_flags) -
pg_upgrade_check = $(citus_abs_srcdir)/upgrade/pg_upgrade_test.py pg_upgrade_check = $(citus_abs_srcdir)/upgrade/pg_upgrade_test.py
citus_upgrade_check = $(citus_abs_srcdir)/upgrade/citus_upgrade_test.py citus_upgrade_check = $(citus_abs_srcdir)/upgrade/citus_upgrade_test.py
template_isolation_files = $(shell find $(citus_abs_srcdir)/spec/ -name '*.spec')
generated_isolation_files = $(patsubst $(citus_abs_srcdir)/spec/%,$(citus_abs_srcdir)/build/specs/%,$(template_isolation_files))
# XXX: Can't actually do useful testruns against install - $libdir # XXX: Can't actually do useful testruns against install - $libdir
# etc will point to the directory configured during postgres' # etc will point to the directory configured during postgres'
# build. We could copy the installed tree around, but that's quite # build. We could copy the installed tree around, but that's quite
@ -48,6 +50,47 @@ check: check-full
# check-full triggers all tests that ought to be run routinely # check-full triggers all tests that ought to be run routinely
check-full: check-multi check-multi-mx check-multi-task-tracker-extra check-worker check-follower-cluster check-failure check-full: check-multi check-multi-mx check-multi-task-tracker-extra check-worker check-follower-cluster check-failure
ISOLATION_DEPDIR=.deps/isolation
ISOLATION_BUILDDIR=build/specs
# this can be used to print a value of variable
# ex: make print-generated_isolation_files
print-% : ; @echo $* = $($*)
.PHONY: create-symbolic-link
create-symbolic-link:
mkdir -p $(citus_abs_srcdir)/build
ln -fsn $(citus_abs_srcdir)/expected $(citus_abs_srcdir)/build/
# How this target works:
# cpp is used before running isolation tests to preprocess spec files.
# This way we can include any file we want to. Currently this is used to include mx common part.
# spec files are put to /build/specs for clear separation between generated files and template files
# a symbolic link is created for /expected in build/expected/.
# when running isolation tests, as the inputdir, build is passed so
# it runs the spec files from build/specs and checks the expected output from build/expected.
# /specs is renamed as /spec because postgres first look at the specs file under current directory,
# so this is renamed to avoid that since we are running the isolation tests from build/specs now.
$(generated_isolation_files): $(citus_abs_srcdir)/build/specs/%: $(citus_abs_srcdir)/spec/%
@mkdir -p $(citus_abs_srcdir)/$(ISOLATION_DEPDIR) $(citus_abs_srcdir)/$(ISOLATION_BUILDDIR)
# -MF is used to store dependency files(.Po) in another directory for separation
# -MT is used to change the target of the rule emitted by dependency generation.
# -P is used to inhibit generation of linemarkers in the output from the preprocessor.
# -undef is used to not predefine any system-specific or GCC-specific macros.
# `man cpp` for further information
cd $(citus_abs_srcdir) && cpp -undef -w -P -MMD -MP -MF$(ISOLATION_DEPDIR)/$(*F).Po -MT$@ $< > $@
Isolation_Po_files := $(wildcard $(ISOLATION_DEPDIR)/*.Po)
ifneq (,$(Isolation_Po_files))
include $(Isolation_Po_files)
endif
isolation_test_files=$(generated_isolation_files) create-symbolic-link
# using pg_regress_multi_check unnecessarily starts up multiple nodes, which isn't needed # using pg_regress_multi_check unnecessarily starts up multiple nodes, which isn't needed
# for check-worker. But that's harmless besides a few cycles. # for check-worker. But that's harmless besides a few cycles.
check-worker: all check-worker: all
@ -101,7 +144,7 @@ check-failure-non-adaptive-base: all tempinstall-main
--server-option=citus.task_executor_type=real-time \ --server-option=citus.task_executor_type=real-time \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_base_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_base_schedule $(EXTRA_TESTS)
check-isolation-non-adaptive: all tempinstall-main check-isolation-non-adaptive: all tempinstall-main $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \ $(pg_regress_multi_check) --load-extension=citus --isolationtester \
--server-option=citus.task_executor_type=real-time \ --server-option=citus.task_executor_type=real-time \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS)
@ -111,11 +154,11 @@ check-multi-vg: all tempinstall-main
--pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(VALGRIND_LOG_FILE) \ --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(VALGRIND_LOG_FILE) \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
check-isolation: all tempinstall-main check-isolation: all tempinstall-main $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \ $(pg_regress_multi_check) --load-extension=citus --isolationtester \
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS)
check-isolation-base: all tempinstall-main check-isolation-base: all tempinstall-main $(isolation_test_files)
$(pg_regress_multi_check) --load-extension=citus --isolationtester \ $(pg_regress_multi_check) --load-extension=citus --isolationtester \
-- $(MULTI_REGRESS_OPTS) $(EXTRA_TESTS) -- $(MULTI_REGRESS_OPTS) $(EXTRA_TESTS)

View File

@ -13,23 +13,22 @@ step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s3-begin: step s3-begin:
BEGIN; BEGIN;
step s1-alter-table: step s1-alter-table:
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
step s2-sleep: step s2-sleep:
SELECT pg_sleep(0.5); SELECT pg_sleep(0.5);
pg_sleep pg_sleep
step s2-view-dist: step s2-view-dist:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
@ -37,7 +36,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s3-view-worker: step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
@ -54,13 +53,13 @@ SELECT worker_apply_shard_ddl_command (1300001, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s3-rollback: step s3-rollback:
ROLLBACK; ROLLBACK;
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
@ -76,43 +75,42 @@ step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s3-begin: step s3-begin:
BEGIN; BEGIN;
step s1-insert: step s1-insert:
INSERT INTO test_table VALUES (100, 100); INSERT INTO test_table VALUES (100, 100);
step s2-sleep: step s2-sleep:
SELECT pg_sleep(0.5); SELECT pg_sleep(0.5);
pg_sleep pg_sleep
step s2-view-dist: step s2-view-dist:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO test_table VALUES (100, 100); INSERT INTO test_table VALUES (100, 100);
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s3-view-worker: step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s3-rollback: step s3-rollback:
ROLLBACK; ROLLBACK;
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
@ -128,10 +126,10 @@ step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s3-begin: step s3-begin:
BEGIN; BEGIN;
step s1-select: step s1-select:
SELECT count(*) FROM test_table; SELECT count(*) FROM test_table;
@ -140,14 +138,13 @@ count
0 0
step s2-sleep: step s2-sleep:
SELECT pg_sleep(0.5); SELECT pg_sleep(0.5);
pg_sleep pg_sleep
step s2-view-dist: step s2-view-dist:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
@ -155,7 +152,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_
SELECT count(*) FROM test_table; SELECT count(*) FROM test_table;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s3-view-worker: step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
@ -164,13 +161,13 @@ SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost
SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s3-rollback: step s3-rollback:
ROLLBACK; ROLLBACK;
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
@ -186,10 +183,10 @@ step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s3-begin: step s3-begin:
BEGIN; BEGIN;
step s1-select-router: step s1-select-router:
SELECT count(*) FROM test_table WHERE column1 = 55; SELECT count(*) FROM test_table WHERE column1 = 55;
@ -198,14 +195,13 @@ count
0 0
step s2-sleep: step s2-sleep:
SELECT pg_sleep(0.5); SELECT pg_sleep(0.5);
pg_sleep pg_sleep
step s2-view-dist: step s2-view-dist:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
@ -213,17 +209,17 @@ query query_hostname query_hostport master_query_host_namemaster_query_
SELECT count(*) FROM test_table WHERE column1 = 55; SELECT count(*) FROM test_table WHERE column1 = 55;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s3-view-worker: step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s3-rollback: step s3-rollback:
ROLLBACK; ROLLBACK;

View File

@ -24,7 +24,6 @@ step detector-dump-wait-edges:
waiting_transaction_num, waiting_transaction_num,
blocking_transaction_num, blocking_transaction_num,
blocking_transaction_waiting; blocking_transaction_waiting;
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
@ -72,7 +71,6 @@ step detector-dump-wait-edges:
waiting_transaction_num, waiting_transaction_num,
blocking_transaction_num, blocking_transaction_num,
blocking_transaction_waiting; blocking_transaction_waiting;
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting waiting_transaction_numblocking_transaction_numblocking_transaction_waiting

View File

@ -6,11 +6,10 @@ run_command_on_workers
(localhost,57637,t,"GRANT ROLE") (localhost,57637,t,"GRANT ROLE")
(localhost,57638,t,"GRANT ROLE") (localhost,57638,t,"GRANT ROLE")
step s1-grant: step s1-grant:
GRANT ALL ON test_table TO test_user_1; GRANT ALL ON test_table TO test_user_1;
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1'); SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1');
GRANT ALL ON test_table TO test_user_2;
GRANT ALL ON test_table TO test_user_2; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
bool_and bool_and
@ -19,19 +18,19 @@ bool_and
t t
step s1-begin-insert: step s1-begin-insert:
BEGIN; BEGIN;
SET ROLE test_user_1; SET ROLE test_user_1;
INSERT INTO test_table VALUES (100, 100); INSERT INTO test_table VALUES (100, 100);
step s2-begin-insert: step s2-begin-insert:
BEGIN; BEGIN;
SET ROLE test_user_2; SET ROLE test_user_2;
INSERT INTO test_table VALUES (200, 200); INSERT INTO test_table VALUES (200, 200);
step s3-as-admin: step s3-as-admin:
-- Admin should be able to see all transactions -- Admin should be able to see all transactions
SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions(); SELECT count(*) FROM get_global_active_transactions();
count count
@ -40,10 +39,10 @@ count
4 4
step s3-as-user-1: step s3-as-user-1:
-- User should only be able to see its own transactions -- User should only be able to see its own transactions
SET ROLE test_user_1; SET ROLE test_user_1;
SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions(); SELECT count(*) FROM get_global_active_transactions();
count count
@ -52,10 +51,10 @@ count
2 2
step s3-as-readonly: step s3-as-readonly:
-- Other user should not see transactions -- Other user should not see transactions
SET ROLE test_readonly; SET ROLE test_readonly;
SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions(); SELECT count(*) FROM get_global_active_transactions();
count count
@ -64,10 +63,10 @@ count
0 0
step s3-as-monitor: step s3-as-monitor:
-- Monitor should see all transactions -- Monitor should see all transactions
SET ROLE test_monitor; SET ROLE test_monitor;
SELECT count(*) FROM get_all_active_transactions(); SELECT count(*) FROM get_all_active_transactions();
SELECT count(*) FROM get_global_active_transactions(); SELECT count(*) FROM get_global_active_transactions();
count count
@ -76,10 +75,10 @@ count
4 4
step s1-commit: step s1-commit:
COMMIT; COMMIT;
step s2-commit: step s2-commit:
COMMIT; COMMIT;
run_command_on_workers run_command_on_workers

View File

@ -5,27 +5,26 @@ step s1-begin:
BEGIN; BEGIN;
step s2-begin: step s2-begin:
BEGIN; BEGIN;
step s1-master_append_table_to_shard: step s1-master_append_table_to_shard:
SELECT SELECT
master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636)
FROM FROM
pg_dist_shard pg_dist_shard
WHERE WHERE
'table_to_append'::regclass::oid = logicalrelid; 'table_to_append'::regclass::oid = logicalrelid;
master_append_table_to_shard master_append_table_to_shard
0.0426667 0.0426667
step s2-master_append_table_to_shard: step s2-master_append_table_to_shard:
SELECT
SELECT master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636)
master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636) FROM
FROM pg_dist_shard
pg_dist_shard WHERE
WHERE 'table_to_append'::regclass::oid = logicalrelid;
'table_to_append'::regclass::oid = logicalrelid;
<waiting ...> <waiting ...>
step s1-commit: step s1-commit:
COMMIT; COMMIT;
@ -35,5 +34,5 @@ master_append_table_to_shard
0.064 0.064
step s2-commit: step s2-commit:
COMMIT; COMMIT;

View File

@ -2,20 +2,18 @@ Parsed test spec with 5 sessions
starting permutation: take-locks s1-start-operation s2-start-operation s3-start-operation show-progress release-locks-1 show-progress release-locks-2 show-progress release-locks-3 starting permutation: take-locks s1-start-operation s2-start-operation s3-start-operation show-progress release-locks-1 show-progress release-locks-2 show-progress release-locks-3
step take-locks: step take-locks:
-- Locks for steps of sample operation in s1 -- Locks for steps of sample operation in s1
SELECT pg_advisory_lock(10); SELECT pg_advisory_lock(10);
SELECT pg_advisory_lock(11); SELECT pg_advisory_lock(11);
SELECT pg_advisory_lock(12); SELECT pg_advisory_lock(12);
-- Locks for steps of sample operation in s2
-- Locks for steps of sample operation in s2 SELECT pg_advisory_lock(20);
SELECT pg_advisory_lock(20); SELECT pg_advisory_lock(21);
SELECT pg_advisory_lock(21); SELECT pg_advisory_lock(22);
SELECT pg_advisory_lock(22); -- Locks for steps of sample operation in s3
SELECT pg_advisory_lock(30);
-- Locks for steps of sample operation in s3 SELECT pg_advisory_lock(31);
SELECT pg_advisory_lock(30); SELECT pg_advisory_lock(32);
SELECT pg_advisory_lock(31);
SELECT pg_advisory_lock(32);
pg_advisory_lock pg_advisory_lock
@ -45,17 +43,17 @@ pg_advisory_lock
step s1-start-operation: step s1-start-operation:
SELECT sample_operation(1337, 10, -1); SELECT sample_operation(1337, 10, -1);
<waiting ...> <waiting ...>
step s2-start-operation: step s2-start-operation:
SELECT sample_operation(1337, 20, 2); SELECT sample_operation(1337, 20, 2);
<waiting ...> <waiting ...>
step s3-start-operation: step s3-start-operation:
SELECT sample_operation(3778, 30, 9); SELECT sample_operation(3778, 30, 9);
<waiting ...> <waiting ...>
step show-progress: step show-progress:
SELECT show_progress(1337); SELECT show_progress(1337);
SELECT show_progress(3778); SELECT show_progress(3778);
show_progress show_progress
@ -68,10 +66,10 @@ show_progress
(0,0) (0,0)
(1,0) (1,0)
step release-locks-1: step release-locks-1:
-- Release the locks of first steps of sample operations -- Release the locks of first steps of sample operations
SELECT pg_advisory_unlock(10); SELECT pg_advisory_unlock(10);
SELECT pg_advisory_unlock(20); SELECT pg_advisory_unlock(20);
SELECT pg_advisory_unlock(30); SELECT pg_advisory_unlock(30);
pg_advisory_unlock pg_advisory_unlock
@ -83,8 +81,8 @@ pg_advisory_unlock
t t
step show-progress: step show-progress:
SELECT show_progress(1337); SELECT show_progress(1337);
SELECT show_progress(3778); SELECT show_progress(3778);
show_progress show_progress
@ -97,10 +95,10 @@ show_progress
(0,9) (0,9)
(1,0) (1,0)
step release-locks-2: step release-locks-2:
-- Release the locks of second steps of sample operations -- Release the locks of second steps of sample operations
SELECT pg_advisory_unlock(11); SELECT pg_advisory_unlock(11);
SELECT pg_advisory_unlock(21); SELECT pg_advisory_unlock(21);
SELECT pg_advisory_unlock(31); SELECT pg_advisory_unlock(31);
pg_advisory_unlock pg_advisory_unlock
@ -112,8 +110,8 @@ pg_advisory_unlock
t t
step show-progress: step show-progress:
SELECT show_progress(1337); SELECT show_progress(1337);
SELECT show_progress(3778); SELECT show_progress(3778);
show_progress show_progress
@ -126,10 +124,10 @@ show_progress
(0,9) (0,9)
(1,9) (1,9)
step release-locks-3: step release-locks-3:
-- Release the locks of final steps of sample operations -- Release the locks of final steps of sample operations
SELECT pg_advisory_unlock(12); SELECT pg_advisory_unlock(12);
SELECT pg_advisory_unlock(22); SELECT pg_advisory_unlock(22);
SELECT pg_advisory_unlock(32); SELECT pg_advisory_unlock(32);
pg_advisory_unlock pg_advisory_unlock

View File

@ -65,7 +65,7 @@ test: isolation_multiuser_locking
# MX tests # MX tests
test: isolation_reference_on_mx test: isolation_reference_on_mx
test: isolation_ref2ref_foreign_keys_on_mx test: isolation_ref2ref_foreign_keys_on_mx
test: isolation_get_distributed_wait_queries test: isolation_get_distributed_wait_queries_mx
test: isolation_insert_vs_all_on_mx test: isolation_insert_vs_all_on_mx
test: isolation_select_vs_all_on_mx test: isolation_select_vs_all_on_mx
test: isolation_update_delete_upsert_vs_all_on_mx test: isolation_update_delete_upsert_vs_all_on_mx

View File

@ -1,6 +1,9 @@
In this folder, all tests which in the format of '*_add.spec' organized In this folder, all tests which in the format of '*_add.spec' organized
according to specific format. according to specific format.
You should use `//` in mx files not `//`. We preprocess mx files with `cpp` to
include `isolation_mx_common.include.spec`.
For isolation tests, we selected 'n' representative operations and we aimed to For isolation tests, we selected 'n' representative operations and we aimed to
perform all possible pairs of 'n' operations together. So first test just runs perform all possible pairs of 'n' operations together. So first test just runs
first of these 'n' operation with remaining 'n - 1' operation. Similary, second first of these 'n' operation with remaining 'n - 1' operation. Similary, second

View File

@ -1,5 +1,5 @@
# the test expects to have zero nodes in pg_dist_node at the beginning // the test expects to have zero nodes in pg_dist_node at the beginning
# add single one of the nodes for the purpose of the test // add single one of the nodes for the purpose of the test
setup setup
{ {
SET citus.shard_replication_factor to 1; SET citus.shard_replication_factor to 1;
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('test_table','x'); SELECT create_distributed_table('test_table','x');
} }
# ensure neither node's added for the remaining of the isolation tests // ensure neither node's added for the remaining of the isolation tests
teardown teardown
{ {
DROP TABLE test_reference_table; DROP TABLE test_reference_table;
@ -46,8 +46,8 @@ step "s1-commit"
session "s2" session "s2"
# COPY accesses all shard/placement metadata, so should be enough for // COPY accesses all shard/placement metadata, so should be enough for
# loading the cache // loading the cache
step "s2-load-metadata-cache" step "s2-load-metadata-cache"
{ {
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
@ -113,10 +113,10 @@ step "s2-print-index-count"
nodeport; nodeport;
} }
# verify that copy/insert gets the invalidation and re-builts its metadata cache // verify that copy/insert gets the invalidation and re-builts its metadata cache
# note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache" // note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache"
# to ensure that metadata is cached otherwise the test would be useless since // to ensure that metadata is cached otherwise the test would be useless since
# the cache would be empty and the metadata data is gathered from the tables directly // the cache would be empty and the metadata data is gathered from the tables directly
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
permutation "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"
@ -127,7 +127,7 @@ permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-creat
permutation "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2" permutation "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
# same tests without loading the cache // same tests without loading the cache
permutation "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
permutation "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"

View File

@ -107,43 +107,43 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# session 1 adds a node, session 2 removes it, should be ok // session 1 adds a node, session 2 removes it, should be ok
permutation "s1-begin" "s1-add-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes" permutation "s1-begin" "s1-add-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes"
# add a different node from 2 sessions, should be ok // add a different node from 2 sessions, should be ok
permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-commit" "s1-show-nodes" permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-commit" "s1-show-nodes"
# add the same node from 2 sessions, should be ok (idempotent) // add the same node from 2 sessions, should be ok (idempotent)
permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-commit" "s1-show-nodes" permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-commit" "s1-show-nodes"
# add a different node from 2 sessions, one aborts // add a different node from 2 sessions, one aborts
permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-abort" "s1-show-nodes" permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-abort" "s1-show-nodes"
# add the same node from 2 sessions, one aborts // add the same node from 2 sessions, one aborts
permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-abort" "s1-show-nodes" permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-abort" "s1-show-nodes"
# remove a different node from 2 transactions, should be ok // remove a different node from 2 transactions, should be ok
permutation "s1-add-node-1" "s1-add-node-2" "s1-begin" "s1-remove-node-1" "s2-remove-node-2" "s1-commit" "s1-show-nodes" permutation "s1-add-node-1" "s1-add-node-2" "s1-begin" "s1-remove-node-1" "s2-remove-node-2" "s1-commit" "s1-show-nodes"
# remove the same node from 2 transactions, should be ok (idempotent) // remove the same node from 2 transactions, should be ok (idempotent)
permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes"
# activate an active node from 2 transactions, should be ok // activate an active node from 2 transactions, should be ok
permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes"
# disable an active node from 2 transactions, should be ok // disable an active node from 2 transactions, should be ok
permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
# activate an inactive node from 2 transactions, should be ok // activate an inactive node from 2 transactions, should be ok
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes"
# disable an inactive node from 2 transactions, should be ok // disable an inactive node from 2 transactions, should be ok
permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
# disable and activate an active node from 2 transactions, should be ok // disable and activate an active node from 2 transactions, should be ok
permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes"
# activate and disable an active node node from 2 transactions, should be ok // activate and disable an active node node from 2 transactions, should be ok
permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
# disable and activate an inactive node from 2 transactions, should be ok // disable and activate an inactive node from 2 transactions, should be ok
permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes"
# activate and disable an inactive node node from 2 transactions, should be ok // activate and disable an inactive node node from 2 transactions, should be ok
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
# activate and disable an inactive node from 2 transactions, one aborts // activate and disable an inactive node from 2 transactions, one aborts
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"
# disable an active node from 2 transactions, one aborts // disable an active node from 2 transactions, one aborts
permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create append distributed table to test behavior of COPY in concurrent operations // create append distributed table to test behavior of COPY in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_distributed_table('append_copy', 'id', 'append'); SELECT create_distributed_table('append_copy', 'id', 'append');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS append_copy CASCADE; DROP TABLE IF EXISTS append_copy CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -51,7 +51,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } step "s2-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
@ -79,10 +79,10 @@ step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELE
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); } step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
# permutations - COPY vs COPY // permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
# permutations - COPY first // permutations - COPY first
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count"
@ -103,7 +103,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-apply-delete-command
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - COPY second // permutations - COPY second
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -1,6 +1,6 @@
# Tests around cancelling statements. As we can't trigger cancel // Tests around cancelling statements. As we can't trigger cancel
# interrupts directly, we use statement_timeout instead, which largely // interrupts directly, we use statement_timeout instead, which largely
# behaves the same as proper cancellation. // behaves the same as proper cancellation.
setup setup
{ {
@ -65,16 +65,16 @@ step "s2-drop"
DROP TABLE cancel_table; DROP TABLE cancel_table;
} }
# check that statement cancel works for plain selects, drop table // check that statement cancel works for plain selects, drop table
# afterwards to make sure sleep on workers is cancelled (thereby not // afterwards to make sure sleep on workers is cancelled (thereby not
# preventing drop via locks) // preventing drop via locks)
permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s1-drop"
permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s2-drop" permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s2-drop"
# check that statement cancel works for selects in transaction // check that statement cancel works for selects in transaction
permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop"
permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop" permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop"
# check that statement cancel works for selects in transaction, that previously wrote // check that statement cancel works for selects in transaction, that previously wrote
permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop"
permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop" permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop"

View File

@ -92,8 +92,8 @@ step "s3-view-worker"
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
} }
# we prefer to sleep before "s2-view-dist" so that we can ensure // we prefer to sleep before "s2-view-dist" so that we can ensure
# the "wait_event" in the output doesn't change randomly (e.g., NULL to CliendRead etc.) // the "wait_event" in the output doesn't change randomly (e.g., NULL to CliendRead etc.)
permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-alter-table" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback" permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-alter-table" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback"
permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-insert" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback" permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-insert" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback"
permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-select" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback" permutation "s1-cache-connections" "s1-begin" "s2-begin" "s3-begin" "s1-select" "s2-sleep" "s2-view-dist" "s3-view-worker" "s2-rollback" "s1-commit" "s3-rollback"

View File

@ -59,17 +59,17 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# verify that an in-progress insert blocks concurrent updates // verify that an in-progress insert blocks concurrent updates
permutation "s1-begin" "s1-insert" "s2-update" "s1-commit" permutation "s1-begin" "s1-insert" "s2-update" "s1-commit"
# but an insert without xact will not block // but an insert without xact will not block
permutation "s1-insert" "s2-update" permutation "s1-insert" "s2-update"
# verify that an in-progress multi-row insert blocks concurrent updates // verify that an in-progress multi-row insert blocks concurrent updates
permutation "s1-begin" "s1-multi-insert" "s2-update" "s1-commit" permutation "s1-begin" "s1-multi-insert" "s2-update" "s1-commit"
# two multi-row inserts that hit same shards will block // two multi-row inserts that hit same shards will block
permutation "s1-begin" "s1-multi-insert" "s2-multi-insert-overlap" "s1-commit" permutation "s1-begin" "s1-multi-insert" "s2-multi-insert-overlap" "s1-commit"
# but concurrent multi-row inserts don't block unless shards overlap // but concurrent multi-row inserts don't block unless shards overlap
permutation "s1-begin" "s2-begin" "s1-multi-insert" "s2-multi-insert" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-multi-insert" "s2-multi-insert" "s1-commit" "s2-commit"

View File

@ -1,5 +1,5 @@
# we use 5 as the partition key value through out the test // we use 5 as the partition key value through out the test
# so setting the corresponding shard here is useful // so setting the corresponding shard here is useful
setup setup
{ {
SET citus.shard_count TO 2; SET citus.shard_count TO 2;
@ -18,9 +18,9 @@ teardown
session "s1" session "s1"
# since test_hash_table has rep > 1 simple select query doesn't hit all placements // since test_hash_table has rep > 1 simple select query doesn't hit all placements
# hence not all placements are cached // hence not all placements are cached
# but with copy all placements are cached // but with copy all placements are cached
step "s1-load-cache" step "s1-load-cache"
{ {
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
@ -48,9 +48,9 @@ step "s2-repair-placement"
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
} }
# since test_hash_table has rep > 1 simple select query doesn't hit all placements // since test_hash_table has rep > 1 simple select query doesn't hit all placements
# hence not all placements are cached // hence not all placements are cached
# but with copy all placements are cached // but with copy all placements are cached
step "s2-load-cache" step "s2-load-cache"
{ {
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV; COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
@ -61,11 +61,11 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# two concurrent shard repairs on the same shard // two concurrent shard repairs on the same shard
# note that "s1-repair-placement" errors out but that is expected // note that "s1-repair-placement" errors out but that is expected
# given that "s2-repair-placement" succeeds and the placement is // given that "s2-repair-placement" succeeds and the placement is
# already repaired // already repaired
permutation "s1-load-cache" "s2-load-cache" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit" permutation "s1-load-cache" "s2-load-cache" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit"
# the same test without the load caches // the same test without the load caches
permutation "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit" permutation "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit"

View File

@ -1,5 +1,5 @@
# we use 5 as the partition key value through out the test // we use 5 as the partition key value through out the test
# so setting the corresponding shard here is useful // so setting the corresponding shard here is useful
setup setup
{ {
SET citus.shard_count TO 2; SET citus.shard_count TO 2;
@ -24,8 +24,8 @@ step "s1-begin"
SET LOCAL citus.select_opens_transaction_block TO off; SET LOCAL citus.select_opens_transaction_block TO off;
} }
# since test_copy_placement_vs_modification has rep > 1 simple select query doesn't hit all placements // since test_copy_placement_vs_modification has rep > 1 simple select query doesn't hit all placements
# hence not all placements are cached // hence not all placements are cached
step "s1-load-cache" step "s1-load-cache"
{ {
TRUNCATE test_copy_placement_vs_modification; TRUNCATE test_copy_placement_vs_modification;
@ -110,9 +110,9 @@ step "s2-print-index-count"
nodeport; nodeport;
} }
# repair a placement while concurrently performing an update/delete/insert/copy // repair a placement while concurrently performing an update/delete/insert/copy
# note that at some points we use "s1-select" just after "s1-begin" given that BEGIN // note that at some points we use "s1-select" just after "s1-begin" given that BEGIN
# may invalidate cache at certain cases // may invalidate cache at certain cases
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content"
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content"
@ -120,7 +120,7 @@ permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count"
# the same tests without loading the cache at first // the same tests without loading the cache at first
permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content"
permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content"
permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content"

View File

@ -1,45 +1,14 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE copy_table(id integer, value integer); CREATE TABLE copy_table(id integer, value integer);
SELECT create_distributed_table('copy_table', 'id'); SELECT create_distributed_table('copy_table', 'id');
COPY copy_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; COPY copy_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS copy_table CASCADE; DROP TABLE IF EXISTS copy_table CASCADE;
@ -48,7 +17,7 @@ teardown
session "s1" session "s1"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -83,7 +52,7 @@ step "s2-begin"
BEGIN; BEGIN;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {
@ -143,5 +112,5 @@ step "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-begin" "s2-coordinator-drop" "s1-commit-worker" "s2-commit" "s1-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-begin" "s2-coordinator-drop" "s1-commit-worker" "s2-commit" "s1-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 //Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection"

View File

@ -52,12 +52,12 @@ step "s2-commit"
COMMIT; COMMIT;
} }
#concurrent create_distributed_table on empty table //concurrent create_distributed_table on empty table
permutation "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-create_distributed_table" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-create_distributed_table" "s1-commit" "s2-commit"
#concurrent create_distributed_table vs. copy to table //concurrent create_distributed_table vs. copy to table
permutation "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-copy_to_local_table" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-copy_to_local_table" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s2-copy_to_local_table" "s1-create_distributed_table" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-copy_to_local_table" "s1-create_distributed_table" "s2-commit" "s1-commit"
#concurrent create_distributed_table on non-empty table //concurrent create_distributed_table on non-empty table
permutation "s1-copy_to_local_table" "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-create_distributed_table" "s1-commit" "s2-commit" permutation "s1-copy_to_local_table" "s1-begin" "s2-begin" "s1-create_distributed_table" "s2-create_distributed_table" "s1-commit" "s2-commit"

View File

@ -142,68 +142,68 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# verify that citus_create_restore_point is blocked by concurrent create_distributed_table // verify that citus_create_restore_point is blocked by concurrent create_distributed_table
permutation "s1-begin" "s1-create-distributed" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-create-distributed" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent INSERT (only commit) // verify that citus_create_restore_point is not blocked by concurrent INSERT (only commit)
permutation "s1-begin" "s1-insert" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-insert" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent multi-shard UPDATE (only commit) // verify that citus_create_restore_point is not blocked by concurrent multi-shard UPDATE (only commit)
permutation "s1-begin" "s1-modify-multiple" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-modify-multiple" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent DDL (only commit) // verify that citus_create_restore_point is not blocked by concurrent DDL (only commit)
permutation "s1-begin" "s1-ddl" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-ddl" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent COPY (only commit) // verify that citus_create_restore_point is not blocked by concurrent COPY (only commit)
permutation "s1-begin" "s1-copy" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-copy" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is blocked by concurrent recover_prepared_transactions // verify that citus_create_restore_point is blocked by concurrent recover_prepared_transactions
permutation "s1-begin" "s1-recover" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-recover" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is blocked by concurrent DROP TABLE // verify that citus_create_restore_point is blocked by concurrent DROP TABLE
permutation "s1-begin" "s1-drop" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-drop" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is blocked by concurrent master_add_node // verify that citus_create_restore_point is blocked by concurrent master_add_node
permutation "s1-begin" "s1-add-node" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-add-node" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is blocked by concurrent master_remove_node // verify that citus_create_restore_point is blocked by concurrent master_remove_node
permutation "s1-begin" "s1-remove-node" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-remove-node" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is blocked by concurrent citus_create_restore_point // verify that citus_create_restore_point is blocked by concurrent citus_create_restore_point
permutation "s1-begin" "s1-create-restore" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-create-restore" "s2-create-restore" "s1-commit"
# verify that multi-shard UPDATE is blocked by concurrent citus_create_restore_point // verify that multi-shard UPDATE is blocked by concurrent citus_create_restore_point
permutation "s2-begin" "s2-create-restore" "s1-modify-multiple" "s2-commit" permutation "s2-begin" "s2-create-restore" "s1-modify-multiple" "s2-commit"
# verify that DDL is blocked by concurrent citus_create_restore_point // verify that DDL is blocked by concurrent citus_create_restore_point
permutation "s2-begin" "s2-create-restore" "s1-ddl" "s2-commit" permutation "s2-begin" "s2-create-restore" "s1-ddl" "s2-commit"
# verify that multi-statement transactions are blocked by concurrent citus_create_restore_point // verify that multi-statement transactions are blocked by concurrent citus_create_restore_point
permutation "s2-begin" "s2-create-restore" "s1-multi-statement" "s2-commit" permutation "s2-begin" "s2-create-restore" "s1-multi-statement" "s2-commit"
# verify that citus_create_restore_point is blocked by concurrent create_reference_table // verify that citus_create_restore_point is blocked by concurrent create_reference_table
permutation "s1-begin" "s1-create-reference" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-create-reference" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent reference table INSERT (only commit) // verify that citus_create_restore_point is not blocked by concurrent reference table INSERT (only commit)
permutation "s1-begin" "s1-insert-ref" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-insert-ref" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent reference table UPDATE (only commit) // verify that citus_create_restore_point is not blocked by concurrent reference table UPDATE (only commit)
permutation "s1-begin" "s1-modify-multiple-ref" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-modify-multiple-ref" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent refence table DDL (only commit) // verify that citus_create_restore_point is not blocked by concurrent refence table DDL (only commit)
permutation "s1-begin" "s1-ddl-ref" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-ddl-ref" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is not blocked by concurrent COPY to reference table (only commit) // verify that citus_create_restore_point is not blocked by concurrent COPY to reference table (only commit)
permutation "s1-begin" "s1-copy-ref" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-copy-ref" "s2-create-restore" "s1-commit"
# verify that citus_create_restore_point is blocked by concurrent DROP TABLE when table is a reference table // verify that citus_create_restore_point is blocked by concurrent DROP TABLE when table is a reference table
permutation "s1-begin" "s1-drop-ref" "s2-create-restore" "s1-commit" permutation "s1-begin" "s1-drop-ref" "s2-create-restore" "s1-commit"
# verify that reference table UPDATE is blocked by concurrent citus_create_restore_point // verify that reference table UPDATE is blocked by concurrent citus_create_restore_point
permutation "s2-begin" "s2-create-restore" "s1-modify-multiple-ref" "s2-commit" permutation "s2-begin" "s2-create-restore" "s1-modify-multiple-ref" "s2-commit"
# verify that reference table DDL is blocked by concurrent citus_create_restore_point // verify that reference table DDL is blocked by concurrent citus_create_restore_point
permutation "s2-begin" "s2-create-restore" "s1-ddl-ref" "s2-commit" permutation "s2-begin" "s2-create-restore" "s1-ddl-ref" "s2-commit"
# verify that multi-statement transactions with reference tables are blocked by concurrent citus_create_restore_point // verify that multi-statement transactions with reference tables are blocked by concurrent citus_create_restore_point
permutation "s2-begin" "s2-create-restore" "s1-multi-statement-ref" "s2-commit" permutation "s2-begin" "s2-create-restore" "s1-multi-statement-ref" "s2-commit"

View File

@ -101,20 +101,20 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# session 1 adds a node, session 2 creates a distributed table // session 1 adds a node, session 2 creates a distributed table
permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select" permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select"
permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select" permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select"
permutation "s2-begin" "s2-create-table-1" "s1-add-node-2" "s2-commit" "s1-show-placements" "s2-select" permutation "s2-begin" "s2-create-table-1" "s1-add-node-2" "s2-commit" "s1-show-placements" "s2-select"
# session 1 removes a node, session 2 creates a distributed table // session 1 removes a node, session 2 creates a distributed table
permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select"
permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select"
permutation "s1-add-node-2" "s2-begin" "s2-create-table-1" "s1-remove-node-2" "s2-commit" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-table-1" "s1-remove-node-2" "s2-commit" "s1-show-placements" "s2-select"
# session 1 removes a node, session 2 creates a distributed table with replication factor 2, should throw a sane error // session 1 removes a node, session 2 creates a distributed table with replication factor 2, should throw a sane error
permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-2" "s1-commit" "s2-select" permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-2" "s1-commit" "s2-select"
permutation "s1-add-node-2" "s2-begin" "s2-create-table-2" "s1-remove-node-2" "s2-commit" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-table-2" "s1-remove-node-2" "s2-commit" "s2-select"
# session 1 removes a node, session 2 creates a shard in an append-distributed table // session 1 removes a node, session 2 creates a shard in an append-distributed table
permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-append-table" "s1-commit" "s2-select" permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-append-table" "s1-commit" "s2-select"
permutation "s1-add-node-2" "s2-begin" "s2-create-append-table" "s1-remove-node-2" "s2-commit" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-append-table" "s1-remove-node-2" "s2-commit" "s2-select"

View File

@ -58,16 +58,16 @@ step "s2-select"
SELECT * FROM migration_table ORDER BY test_id; SELECT * FROM migration_table ORDER BY test_id;
} }
# verify that local COPY is picked up by create_distributed_table once it commits // verify that local COPY is picked up by create_distributed_table once it commits
permutation "s2-begin" "s2-copy" "s1-create_distributed_table" "s2-commit" "s2-select" permutation "s2-begin" "s2-copy" "s1-create_distributed_table" "s2-commit" "s2-select"
# verify that COPY is distributed once create_distributed_table commits // verify that COPY is distributed once create_distributed_table commits
permutation "s1-begin" "s1-create_distributed_table" "s2-copy" "s1-commit" "s2-select" permutation "s1-begin" "s1-create_distributed_table" "s2-copy" "s1-commit" "s2-select"
# verify that local INSERT is picked up by create_distributed_table once it commits // verify that local INSERT is picked up by create_distributed_table once it commits
permutation "s2-begin" "s2-insert" "s1-create_distributed_table" "s2-commit" "s2-select" permutation "s2-begin" "s2-insert" "s1-create_distributed_table" "s2-commit" "s2-select"
# verify that INSERT is distributed once create_distributed_table commits // verify that INSERT is distributed once create_distributed_table commits
permutation "s1-begin" "s1-create_distributed_table" "s2-insert" "s1-commit" "s2-select" permutation "s1-begin" "s1-create_distributed_table" "s2-insert" "s1-commit" "s2-select"
# verify that changes are picked up even in serializable mode // verify that changes are picked up even in serializable mode
permutation "s1-begin-serializable" "s2-copy" "s1-create_distributed_table" "s1-commit" "s2-select" permutation "s1-begin-serializable" "s2-copy" "s1-create_distributed_table" "s1-commit" "s2-select"
permutation "s1-begin-serializable" "s2-insert" "s1-create_distributed_table" "s1-commit" "s2-select" permutation "s1-begin-serializable" "s2-insert" "s1-create_distributed_table" "s1-commit" "s2-select"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of DDL in concurrent operations // create range distributed table to test behavior of DDL in concurrent operations
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('ddl_hash', 'id'); SELECT create_distributed_table('ddl_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS ddl_hash CASCADE; DROP TABLE IF EXISTS ddl_hash CASCADE;
@ -21,7 +21,7 @@ teardown
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -40,7 +40,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-ddl-create-index" { CREATE INDEX ddl_hash_index ON ddl_hash(id); } step "s2-ddl-create-index" { CREATE INDEX ddl_hash_index ON ddl_hash(id); }
@ -56,7 +56,7 @@ step "s2-distribute-table" { SELECT create_distributed_table('ddl_hash', 'id');
step "s2-select" { SELECT * FROM ddl_hash ORDER BY 1, 2; } step "s2-select" { SELECT * FROM ddl_hash ORDER BY 1, 2; }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - DDL vs DDL // permutations - DDL vs DDL
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-indexes" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-indexes" "s1-show-columns"
@ -70,7 +70,7 @@ permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-ddl-create-ind
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns"
# permutations - DDL first // permutations - DDL first
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-table-size" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-table-size" "s1-commit" "s2-commit" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-indexes"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-indexes"
@ -83,7 +83,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-tab
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-columns"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-columns"
# permutations - DDL second // permutations - DDL second
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of DELETE in concurrent operations // create range distributed table to test behavior of DELETE in concurrent operations
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('delete_hash', 'id'); SELECT create_distributed_table('delete_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS delete_hash CASCADE; DROP TABLE IF EXISTS delete_hash CASCADE;
@ -21,7 +21,7 @@ teardown
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -41,7 +41,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-delete" { DELETE FROM delete_hash WHERE id = 4; } step "s2-delete" { DELETE FROM delete_hash WHERE id = 4; }
@ -59,10 +59,10 @@ step "s2-distribute-table" { SELECT create_distributed_table('delete_hash', 'id'
step "s2-select" { SELECT * FROM delete_hash ORDER BY 1, 2; } step "s2-select" { SELECT * FROM delete_hash ORDER BY 1, 2; }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - DELETE vs DELETE // permutations - DELETE vs DELETE
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-delete" "s1-commit" "s2-commit" "s1-select-count"
# permutations - DELETE first // permutations - DELETE first
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-drop" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"
@ -74,7 +74,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-rename-col
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count"
# permutations - DELETE second // permutations - DELETE second
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-delete" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-delete" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"

View File

@ -1,37 +1,7 @@
#include "isolation_mx_common.include.spec"
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block.
-- Following is a workaround to overcome that. Port numbers are hard coded
-- at the moment.
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE ref_table(id int PRIMARY KEY, value int); CREATE TABLE ref_table(id int PRIMARY KEY, value int);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
@ -163,4 +133,4 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-rollback-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-rollback-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-udpate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-udpate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-display"
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" "s3-display" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" "s3-display"

View File

@ -368,69 +368,69 @@ step "s6-commit"
COMMIT; COMMIT;
} }
# we disable the daemon during the regression tests in order to get consistent results // we disable the daemon during the regression tests in order to get consistent results
# thus we manually issue the deadlock detection // thus we manually issue the deadlock detection
session "deadlock-checker" session "deadlock-checker"
# we issue the checker not only when there are deadlocks to ensure that we never cancel // we issue the checker not only when there are deadlocks to ensure that we never cancel
# backend inappropriately // backend inappropriately
step "deadlock-checker-call" step "deadlock-checker-call"
{ {
SELECT check_distributed_deadlocks(); SELECT check_distributed_deadlocks();
} }
# simplest case, loop with two nodes // simplest case, loop with two nodes
permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit"
# simplest case with replication factor 2 // simplest case with replication factor 2
permutation "s1-begin" "s2-begin" "s1-update-1-rep-2" "s2-update-2-rep-2" "s2-update-1-rep-2" "deadlock-checker-call" "s1-update-2-rep-2" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-update-1-rep-2" "s2-update-2-rep-2" "s2-update-1-rep-2" "deadlock-checker-call" "s1-update-2-rep-2" "deadlock-checker-call" "s1-commit" "s2-commit"
# simplest case with 2pc enabled // simplest case with 2pc enabled
permutation "s1-begin" "s2-begin" "s1-set-2pc" "s2-set-2pc" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-set-2pc" "s2-set-2pc" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-commit" "s2-commit"
# simplest case with multi-shard query is cancelled // simplest case with multi-shard query is cancelled
permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-upsert-select-all" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-upsert-select-all" "deadlock-checker-call" "s1-commit" "s2-commit"
# simplest case with DDL is cancelled // simplest case with DDL is cancelled
permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-ddl" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-ddl" "deadlock-checker-call" "s1-commit" "s2-commit"
# daedlock with local table // daedlock with local table
permutation "s1-begin" "s2-begin" "s1-insert-dist-10" "s2-insert-local-10" "s2-insert-dist-10" "s1-insert-local-10" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-insert-dist-10" "s2-insert-local-10" "s2-insert-dist-10" "s1-insert-local-10" "deadlock-checker-call" "s1-commit" "s2-commit"
# daedlock with reference tables only // daedlock with reference tables only
permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-insert-ref-11" "s2-insert-ref-11" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-insert-ref-11" "s2-insert-ref-11" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit"
# deadlock with reference + distributed tables // deadlock with referecen + distributed tables
permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-update-1" "deadlock-checker-call" "s2-update-1" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-update-1" "deadlock-checker-call" "s2-update-1" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit"
# slightly more complex case, loop with three nodes // slightly more complex case, loop with three nodes
permutation "s1-begin" "s2-begin" "s3-begin" "s1-update-1" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s1-update-2" "s2-update-3" "s3-update-1" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s1-update-1" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s1-update-2" "s2-update-3" "s3-update-1" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit"
# similar to the above (i.e., 3 nodes), but the cycle starts from the second node // similar to the above (i.e., 3 nodes), but the cycle starts from the second node
permutation "s1-begin" "s2-begin" "s3-begin" "s2-update-1" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s2-update-3" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s2-update-1" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s2-update-3" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit"
# not connected graph // not connected graph
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s4-update-4" "s2-update-3" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" "s4-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s4-update-4" "s2-update-3" "deadlock-checker-call" "s3-commit" "s2-commit" "s1-commit" "s4-commit"
# still a not connected graph, but each smaller graph contains dependencies, one of which is a distributed deadlock // still a not connected graph, but each smaller graph contains dependencies, one of which is a distributed deadlock
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s4-update-1" "s1-update-1" "deadlock-checker-call" "s2-update-2" "s3-update-3" "s2-update-3" "s3-update-2" "deadlock-checker-call" "s3-commit" "s2-commit" "s4-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s4-update-1" "s1-update-1" "deadlock-checker-call" "s2-update-2" "s3-update-3" "s2-update-3" "s3-update-2" "deadlock-checker-call" "s3-commit" "s2-commit" "s4-commit" "s1-commit"
# multiple deadlocks on a not connected graph // multiple deadlocks on a not connected graph
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s4-update-4" "s2-update-2" "s3-update-3" "s3-update-2" "s4-update-1" "s1-update-4" "deadlock-checker-call" "s1-commit" "s4-commit" "s2-update-3" "deadlock-checker-call" "s2-commit" "s3-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s4-update-4" "s2-update-2" "s3-update-3" "s3-update-2" "s4-update-1" "s1-update-4" "deadlock-checker-call" "s1-commit" "s4-commit" "s2-update-3" "deadlock-checker-call" "s2-commit" "s3-commit"
# a larger graph where the first node is in the distributed deadlock // a larger graph where the first node is in the distributed deadlock
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s1-update-1" "s5-update-5" "s3-update-2" "s2-update-3" "s4-update-4" "s3-update-4" "deadlock-checker-call" "s6-update-6" "s4-update-6" "s1-update-5" "s5-update-1" "deadlock-checker-call" "s1-commit" "s5-commit" "s6-commit" "s4-commit" "s3-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s1-update-1" "s5-update-5" "s3-update-2" "s2-update-3" "s4-update-4" "s3-update-4" "deadlock-checker-call" "s6-update-6" "s4-update-6" "s1-update-5" "s5-update-1" "deadlock-checker-call" "s1-commit" "s5-commit" "s6-commit" "s4-commit" "s3-commit" "s2-commit"
# a larger graph where the deadlock starts from a middle node // a larger graph where the deadlock starts from a middle node
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s6-update-6" "s5-update-5" "s5-update-6" "s4-update-4" "s1-update-4" "s4-update-5" "deadlock-checker-call" "s2-update-3" "s3-update-2" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s6-commit" "s5-commit" "s4-commit" "s1-commit" "s3-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s6-update-6" "s5-update-5" "s5-update-6" "s4-update-4" "s1-update-4" "s4-update-5" "deadlock-checker-call" "s2-update-3" "s3-update-2" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s6-commit" "s5-commit" "s4-commit" "s1-commit" "s3-commit" "s2-commit"
# a larger graph where the deadlock starts from the last node // a larger graph where the deadlock starts from the last node
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s5-update-5" "s3-update-2" "s2-update-2" "s4-update-4" "s3-update-4" "s4-update-5" "s1-update-4" "deadlock-checker-call" "s6-update-6" "s5-update-6" "s6-update-5" "deadlock-checker-call" "s5-commit" "s6-commit" "s4-commit" "s3-commit" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s5-update-5" "s3-update-2" "s2-update-2" "s4-update-4" "s3-update-4" "s4-update-5" "s1-update-4" "deadlock-checker-call" "s6-update-6" "s5-update-6" "s6-update-5" "deadlock-checker-call" "s5-commit" "s6-commit" "s4-commit" "s3-commit" "s1-commit" "s2-commit"
# a backend is blocked on multiple backends // a backend is blocked on multiple backends
# note that session 5 is not strictly necessary to simulate the deadlock // note that session 5 is not strictly necessary to simulate the deadlock
# we only added that such that session 4 waits on for that // we only added that such that session 4 waits on for that
# thus if any cancellation happens on session 4, we'd be able to // thus if any cancellation happens on session 4, we'd be able to
# observe it, otherwise cancelling idle backends has not affect // observe it, otherwise cancelling idle backends has not affect
# (cancelling wrong backend used to be a bug and already fixed) // (cancelling wrong backend used to be a bug and already fixed)
permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s1-update-1" "s3-update-3" "s2-update-4" "s2-update-3" "s4-update-2" "s5-random-adv-lock" "s4-random-adv-lock" "s3-update-1" "s1-update-2-4" "deadlock-checker-call" "deadlock-checker-call" "s5-commit" "s4-commit" "s2-commit" "s1-commit" "s3-commit" permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s1-update-1" "s3-update-3" "s2-update-4" "s2-update-3" "s4-update-2" "s5-random-adv-lock" "s4-random-adv-lock" "s3-update-1" "s1-update-2-4" "deadlock-checker-call" "deadlock-checker-call" "s5-commit" "s4-commit" "s2-commit" "s1-commit" "s3-commit"

View File

@ -1,4 +1,4 @@
# Tests around distributed transaction id generation // Tests around distributed transaction id generation
setup setup
{ {
@ -86,7 +86,7 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# print only the necessary parts to prevent concurrent runs to print different values // print only the necessary parts to prevent concurrent runs to print different values
step "s2-get-first-worker-active-transactions" step "s2-get-first-worker-active-transactions"
{ {
SELECT * FROM run_command_on_workers('SELECT row(initiator_node_identifier, transaction_number) SELECT * FROM run_command_on_workers('SELECT row(initiator_node_identifier, transaction_number)
@ -124,13 +124,13 @@ step "s3-get-all-transactions"
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
} }
# show that we could get all distributed transaction ids from seperate sessions // show that we could get all distributed transaction ids from seperate sessions
permutation "s1-begin" "s1-assign-transaction-id" "s1-get-all-transactions" "s2-begin" "s2-assign-transaction-id" "s2-get-all-transactions" "s3-begin" "s3-assign-transaction-id" "s3-get-all-transactions" "s1-commit" "s2-commit" "s3-commit" permutation "s1-begin" "s1-assign-transaction-id" "s1-get-all-transactions" "s2-begin" "s2-assign-transaction-id" "s2-get-all-transactions" "s3-begin" "s3-assign-transaction-id" "s3-get-all-transactions" "s1-commit" "s2-commit" "s3-commit"
# now show that distributed transaction id on the coordinator // now show that distributed transaction id on the coordinator
# is the same with the one on the worker // is the same with the one on the worker
permutation "s1-create-table" "s1-begin" "s1-insert" "s1-verify-current-xact-is-on-worker" "s1-commit" permutation "s1-create-table" "s1-begin" "s1-insert" "s1-verify-current-xact-is-on-worker" "s1-commit"
# we would initially forget the distributed transaction ID on pg_dist_partition invalidations // we would initially forget the distributed transaction ID on pg_dist_partition invalidations
permutation "s1-begin" "s1-assign-transaction-id" "s1-has-transaction-number" "s2-vacuum" "s1-has-transaction-number" "s1-commit" permutation "s1-begin" "s1-assign-transaction-id" "s1-has-transaction-number" "s2-vacuum" "s1-has-transaction-number" "s1-commit"

View File

@ -93,17 +93,17 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# verify that repair is blocked by ongoing modifying simple transaction // verify that repair is blocked by ongoing modifying simple transaction
permutation "s2-invalidate-57637" "s1-begin" "s1-insertone" "s2-repair" "s1-commit" permutation "s2-invalidate-57637" "s1-begin" "s1-insertone" "s2-repair" "s1-commit"
# verify that repair is blocked by ongoing modifying insert...select transaction // verify that repair is blocked by ongoing modifying insert...select transaction
permutation "s1-insertone" "s2-invalidate-57637" "s1-begin" "s1-insertall" "s2-repair" "s1-commit" permutation "s1-insertone" "s2-invalidate-57637" "s1-begin" "s1-insertall" "s2-repair" "s1-commit"
# verify that modifications wait for shard repair // verify that modifications wait for shard repair
permutation "s2-invalidate-57637" "s2-begin" "s2-repair" "s1-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" permutation "s2-invalidate-57637" "s2-begin" "s2-repair" "s1-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display"
# verify that prepared plain modifications wait for shard repair // verify that prepared plain modifications wait for shard repair
permutation "s2-invalidate-57637" "s1-prepared-insertone" "s2-begin" "s2-repair" "s1-prepared-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" permutation "s2-invalidate-57637" "s1-prepared-insertone" "s2-begin" "s2-repair" "s1-prepared-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display"
# verify that prepared INSERT ... SELECT waits for shard repair // verify that prepared INSERT ... SELECT waits for shard repair
permutation "s2-invalidate-57637" "s1-insertone" "s1-prepared-insertall" "s2-begin" "s2-repair" "s1-prepared-insertall" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" permutation "s2-invalidate-57637" "s1-insertone" "s1-prepared-insertall" "s2-begin" "s2-repair" "s1-prepared-insertall" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display"

View File

@ -1,45 +1,14 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE dist_table(id integer, value integer); CREATE TABLE dist_table(id integer, value integer);
SELECT create_distributed_table('dist_table', 'id'); SELECT create_distributed_table('dist_table', 'id');
COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS dist_table CASCADE; DROP TABLE IF EXISTS dist_table CASCADE;
@ -53,7 +22,7 @@ step "s1-begin"
BEGIN; BEGIN;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -98,7 +67,7 @@ step "s1-commit"
session "s2" session "s2"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {

View File

@ -67,9 +67,9 @@ permutation "s1-begin" "s1-drop-all-shards" "s2-apply-delete-command" "s1-commit
permutation "s1-begin" "s1-drop-all-shards" "s2-drop-all-shards" "s1-commit" permutation "s1-begin" "s1-drop-all-shards" "s2-drop-all-shards" "s1-commit"
permutation "s1-begin" "s1-drop-all-shards" "s2-select" "s1-commit" permutation "s1-begin" "s1-drop-all-shards" "s2-select" "s1-commit"
# We can't verify master_apply_delete_command + SELECT since it blocks on the // We can't verify master_apply_delete_command + SELECT since it blocks on the
# the workers, but this is not visible on the master, meaning the isolation // the workers, but this is not visible on the master, meaning the isolation
# test cannot proceed. // test cannot proceed.
permutation "s1-begin" "s1-apply-delete-command" "s2-truncate" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-truncate" "s1-commit"
permutation "s1-begin" "s1-apply-delete-command" "s2-apply-delete-command" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-apply-delete-command" "s1-commit"
permutation "s1-begin" "s1-apply-delete-command" "s2-drop-all-shards" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-drop-all-shards" "s1-commit"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of DROP in concurrent operations // create range distributed table to test behavior of DROP in concurrent operations
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('drop_hash', 'id'); SELECT create_distributed_table('drop_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS drop_hash CASCADE; DROP TABLE IF EXISTS drop_hash CASCADE;
@ -21,7 +21,7 @@ teardown
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -39,7 +39,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-drop" { DROP TABLE drop_hash; } step "s2-drop" { DROP TABLE drop_hash; }
@ -55,10 +55,10 @@ step "s2-distribute-table" { SELECT create_distributed_table('drop_hash', 'id');
step "s2-select" { SELECT * FROM drop_hash ORDER BY 1, 2; } step "s2-select" { SELECT * FROM drop_hash ORDER BY 1, 2; }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - DROP vs DROP // permutations - DROP vs DROP
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-drop" "s1-commit" "s2-commit" "s1-select-count"
# permutations - DROP first // permutations - DROP first
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s1-drop" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes"
@ -68,7 +68,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-rename-colum
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count"
# permutations - DROP second // permutations - DROP second
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns"

View File

@ -87,8 +87,8 @@ step "detector-dump-wait-edges"
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
} }
# Distributed transaction blocked by another distributed transaction // Distributed transaction blocked by another distributed transaction
permutation "s1-begin" "s2-begin" "s1-update" "s2-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" permutation "s1-begin" "s2-begin" "s1-update" "s2-update" "detector-dump-wait-edges" "s1-abort" "s2-abort"
# Distributed transaction blocked by another distributed transaction blocked by another distributed transaction // Distributed transaction blocked by another distributed transaction blocked by another distributed transaction
permutation "s1-begin" "s2-begin" "s3-begin" "s1-update" "s2-update" "s3-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" "s3-abort" permutation "s1-begin" "s2-begin" "s3-begin" "s1-update" "s2-update" "s3-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" "s3-abort"

View File

@ -81,11 +81,11 @@ step "detector-dump-wait-edges"
blocking_transaction_waiting; blocking_transaction_waiting;
} }
# Distributed transaction blocked by another distributed transaction // Distributed transaction blocked by another distributed transaction
permutation "dist11-begin" "dist13-begin" "dist11-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "dist13-abort" permutation "dist11-begin" "dist13-begin" "dist11-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "dist13-abort"
# Distributed transaction blocked by a regular transaction // Distributed transaction blocked by a regular transaction
permutation "local-begin" "dist13-begin" "local-update" "dist13-update" "detector-dump-wait-edges" "local-abort" "dist13-abort" permutation "local-begin" "dist13-begin" "local-update" "dist13-update" "detector-dump-wait-edges" "local-abort" "dist13-abort"
# Distributed transaction blocked by a regular transaction blocked by a distributed transaction // Distributed transaction blocked by a regular transaction blocked by a distributed transaction
permutation "dist11-begin" "local-begin" "dist13-begin" "dist11-update" "local-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "local-abort" "dist13-abort" permutation "dist11-begin" "local-begin" "dist13-begin" "dist11-update" "local-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "local-abort" "dist13-abort"

View File

@ -1,5 +1,5 @@
# the test expects to have zero nodes in pg_dist_node at the beginning // the test expects to have zero nodes in pg_dist_node at the beginning
# add single one of the nodes for the purpose of the test // add single one of the nodes for the purpose of the test
setup setup
{ {
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER) CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER)
@ -11,7 +11,7 @@ setup
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
} }
# ensure that both nodes exists for the remaining of the isolation tests // ensure that both nodes exists for the remaining of the isolation tests
teardown teardown
{ {
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
@ -50,8 +50,8 @@ step "s1-commit"
COMMIT; COMMIT;
} }
# printing in session 1 adds the worker node, this makes we are sure we count the objects // printing in session 1 adds the worker node, this makes we are sure we count the objects
# on that node as well. After counting objects is done we remove the node again. // on that node as well. After counting objects is done we remove the node again.
step "s1-print-distributed-objects" step "s1-print-distributed-objects"
{ {
SELECT 1 FROM master_add_node('localhost', 57638); SELECT 1 FROM master_add_node('localhost', 57638);
@ -126,8 +126,8 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# prints from session 2 are run at the end when the worker has already been added by the // prints from session 2 are run at the end when the worker has already been added by the
# test // test
step "s2-print-distributed-objects" step "s2-print-distributed-objects"
{ {
-- print an overview of all distributed objects -- print an overview of all distributed objects
@ -195,7 +195,7 @@ step "s3-commit"
} }
# schema only tests // schema only tests
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects"
@ -203,25 +203,25 @@ permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects"
# concurrency tests with multi schema distribution // concurrency tests with multi schema distribution
permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-table" "s3-use-schema" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-table" "s3-use-schema" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s2-commit" "s3-commit" "s2-print-distributed-objects"
# type and schema tests // type and schema tests
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects"
# distributed function tests // distributed function tests
# isolation tests are not very simple psql, so trigger NOTIFY reliably for // isolation tests are not very simple psql, so trigger NOTIFY reliably for
# s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by // s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by
# "s2-commit", because "COMMIT" syncs the messages // "s2-commit", because "COMMIT" syncs the messages
permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects"
permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects"
# we cannot run the following operations concurrently // we cannot run the following operations concurrently
# the problem is that NOTIFY event doesn't (reliably) happen before COMMIT // the problem is that NOTIFY event doesn't (reliably) happen before COMMIT
# so we have to commit s2 before s1 starts // so we have to commit s2 before s1 starts
permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects"

View File

@ -105,7 +105,7 @@ step "s2-remove-node-1"
SELECT 1 FROM master_remove_node('localhost', 57637); SELECT 1 FROM master_remove_node('localhost', 57637);
} }
# master_#_node vs extension command // master_//_node vs extension command
permutation "s1-begin" "s1-add-node-1" "s2-create-extension-version-11" "s1-commit" "s1-print" permutation "s1-begin" "s1-add-node-1" "s2-create-extension-version-11" "s1-commit" "s1-print"
permutation "s1-begin" "s1-add-node-1" "s2-alter-extension-update-to-version-12" "s1-commit" "s1-print" permutation "s1-begin" "s1-add-node-1" "s2-alter-extension-update-to-version-12" "s1-commit" "s1-print"
permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-drop-extension" "s1-commit" "s1-print" permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-drop-extension" "s1-commit" "s1-print"
@ -114,7 +114,7 @@ permutation "s1-begin" "s1-add-node-1" "s2-drop-extension" "s1-commit" "s1-print
permutation "s1-add-node-1" "s1-create-extension-with-schema2" "s1-begin" "s1-remove-node-1" "s2-alter-extension-set-schema3" "s1-commit" "s1-print" permutation "s1-add-node-1" "s1-create-extension-with-schema2" "s1-begin" "s1-remove-node-1" "s2-alter-extension-set-schema3" "s1-commit" "s1-print"
permutation "s1-add-node-1" "s2-drop-extension" "s1-begin" "s1-remove-node-1" "s2-create-extension-with-schema1" "s1-commit" "s1-print" permutation "s1-add-node-1" "s2-drop-extension" "s1-begin" "s1-remove-node-1" "s2-create-extension-with-schema1" "s1-commit" "s1-print"
# extension command vs master_#_node // extension command vs master_#_node
permutation "s2-add-node-1" "s2-drop-extension" "s2-remove-node-1" "s2-begin" "s2-create-extension-version-11" "s1-add-node-1" "s2-commit" "s1-print" permutation "s2-add-node-1" "s2-drop-extension" "s2-remove-node-1" "s2-begin" "s2-create-extension-version-11" "s1-add-node-1" "s2-commit" "s1-print"
permutation "s2-drop-extension" "s2-add-node-1" "s2-create-extension-version-11" "s2-remove-node-1" "s2-begin" "s2-alter-extension-update-to-version-12" "s1-add-node-1" "s2-commit" "s1-print" permutation "s2-drop-extension" "s2-add-node-1" "s2-create-extension-version-11" "s2-remove-node-1" "s2-begin" "s2-alter-extension-update-to-version-12" "s1-add-node-1" "s2-commit" "s1-print"
permutation "s2-add-node-1" "s2-begin" "s2-drop-extension" "s1-remove-node-1" "s2-commit" "s1-print" permutation "s2-add-node-1" "s2-begin" "s2-drop-extension" "s1-remove-node-1" "s2-commit" "s1-print"

View File

@ -30,7 +30,7 @@ teardown
session "s1" session "s1"
# run_command_on_placements is done in a separate step because the setup is executed as a single transaction // run_command_on_placements is done in a separate step because the setup is executed as a single transaction
step "s1-grant" step "s1-grant"
{ {
GRANT ALL ON test_table TO test_user_1; GRANT ALL ON test_table TO test_user_1;

View File

@ -1,38 +1,6 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup
{
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model to streaming;
setup {
CREATE TABLE ref_table(user_id int, value_1 int); CREATE TABLE ref_table(user_id int, value_1 int);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71); INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
@ -42,8 +10,8 @@ setup
INSERT INTO tt1 VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71); INSERT INTO tt1 VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE ref_table; DROP TABLE ref_table;
@ -63,7 +31,7 @@ step "s1-update-ref-table-from-coordinator"
UPDATE ref_table SET value_1 = 15; UPDATE ref_table SET value_1 = 15;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -199,7 +167,7 @@ step "s3-select-distributed-waiting-queries"
SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits;
} }
# session s1 and s4 executes the commands on the same worker node // session s1 and s4 executes the commands on the same worker node
session "s4" session "s4"
step "s4-start-session-level-connection" step "s4-start-session-level-connection"
@ -238,8 +206,8 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy-to
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-ref-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-ref-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection"
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-into-ref-table" "s1-begin" "s1-alter-table" "s3-select-distributed-waiting-queries" "s2-commit-worker" "s1-commit" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-into-ref-table" "s1-begin" "s1-alter-table" "s3-select-distributed-waiting-queries" "s2-commit-worker" "s1-commit" "s2-stop-connection"
# make sure that multi-shard modification queries // make sure that multi-shard modification queries
# show up in the waiting processes even if they are // show up in the waiting processes even if they are
# blocked on the same node // blocked on the same node
permutation "s1-begin" "s1-update-on-the-coordinator" "s2-update-on-the-coordinator" "s3-select-distributed-waiting-queries" "s1-commit" permutation "s1-begin" "s1-update-on-the-coordinator" "s2-update-on-the-coordinator" "s3-select-distributed-waiting-queries" "s1-commit"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update-dist-table" "s4-start-session-level-connection" "s4-begin-on-worker" "s4-update-dist-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s4-commit-worker" "s1-stop-connection" "s4-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update-dist-table" "s4-start-session-level-connection" "s4-begin-on-worker" "s4-update-dist-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s4-commit-worker" "s1-stop-connection" "s4-stop-connection"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create append distributed table to test behavior of COPY in concurrent operations // create append distributed table to test behavior of COPY in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_distributed_table('hash_copy', 'id'); SELECT create_distributed_table('hash_copy', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS hash_copy CASCADE; DROP TABLE IF EXISTS hash_copy CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -58,7 +58,7 @@ step "s1-recreate-with-replication-2"
SELECT create_distributed_table('hash_copy', 'id'); SELECT create_distributed_table('hash_copy', 'id');
} }
# session 2 // session 2
session "s2" session "s2"
step "s2-copy" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-copy-additional-column" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } step "s2-copy-additional-column" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
@ -86,10 +86,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM hash_copy; }
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); } step "s2-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); }
# permutations - COPY vs COPY // permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
# permutations - COPY first // permutations - COPY first
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count"
@ -110,13 +110,13 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shar
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - COPY first (replication factor 2) // permutations - COPY first (replication factor 2)
permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count"
permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count"
permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-recreate-with-replication-2" "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count"
# permutations - COPY second // permutations - COPY second
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of INSERT/SELECT in concurrent operations // create range distributed table to test behavior of INSERT/SELECT in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -12,14 +12,14 @@ setup
SELECT create_distributed_table('select_of_insert_select_hash', 'id'); SELECT create_distributed_table('select_of_insert_select_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS insert_of_insert_select_hash CASCADE; DROP TABLE IF EXISTS insert_of_insert_select_hash CASCADE;
DROP TABLE IF EXISTS select_of_insert_select_hash CASCADE; DROP TABLE IF EXISTS select_of_insert_select_hash CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" step "s1-initialize"
{ {
@ -65,7 +65,7 @@ step "s1-show-columns-selected" { SELECT run_command_on_workers('SELECT column_n
step "s1-select-count" { SELECT COUNT(*) FROM select_of_insert_select_hash; } step "s1-select-count" { SELECT COUNT(*) FROM select_of_insert_select_hash; }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-insert-select" { INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; } step "s2-insert-select" { INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; }
step "s2-update-on-inserted" { UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; } step "s2-update-on-inserted" { UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; }
@ -99,10 +99,10 @@ step "s2-master-drop-all-shards-on-selected" { SELECT master_drop_all_shards('se
step "s2-create-non-distributed-table-on-selected" { CREATE TABLE select_of_insert_select_hash(id integer, data text); } step "s2-create-non-distributed-table-on-selected" { CREATE TABLE select_of_insert_select_hash(id integer, data text); }
step "s2-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); } step "s2-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); }
# permutations - INSERT/SELECT vs INSERT/SELECT // permutations - INSERT/SELECT vs INSERT/SELECT
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-select" "s1-commit" "s1-select-count"
# permutations - INSERT/SELECT first operation on INSERT side // permutations - INSERT/SELECT first operation on INSERT side
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-inserted" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-inserted" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-inserted" "s1-commit" "s1-select-count"
@ -118,7 +118,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-modify-mult
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-inserted" "s1-commit" "s1-select-count"
permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-inserted" "s1-commit" "s1-select-count" permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-inserted" "s1-commit" "s1-select-count"
# permutations - INSERT/SELECT first operation on SELECT side // permutations - INSERT/SELECT first operation on SELECT side
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-selected" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-selected" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-selected" "s1-commit" "s1-select-count"
@ -134,7 +134,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-modify-mult
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-selected" "s1-commit" "s1-select-count"
permutation "s1-drop-on-selected" "s1-create-non-distributed-table-on-selected" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-selected" "s1-commit" "s1-select-count" permutation "s1-drop-on-selected" "s1-create-non-distributed-table-on-selected" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-selected" "s1-commit" "s1-select-count"
# permutations - INSERT/SELECT second on INSERT side // permutations - INSERT/SELECT second on INSERT side
permutation "s1-initialize" "s1-begin" "s1-update-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-delete-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-truncate-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count"
@ -149,7 +149,7 @@ permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards-on-inse
permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-distribute-table-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-distribute-table-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count"
# permutations - INSERT/SELECT second on SELECT side // permutations - INSERT/SELECT second on SELECT side
permutation "s1-initialize" "s1-begin" "s1-update-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update-on-selected" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-delete-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete-on-selected" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-truncate-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate-on-selected" "s2-insert-select" "s1-commit" "s1-select-count"

View File

@ -1,45 +1,14 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE dist_table(id integer, value integer); CREATE TABLE dist_table(id integer, value integer);
SELECT create_distributed_table('dist_table', 'id'); SELECT create_distributed_table('dist_table', 'id');
COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; COPY dist_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS dist_table CASCADE; DROP TABLE IF EXISTS dist_table CASCADE;
@ -48,7 +17,7 @@ teardown
session "s1" session "s1"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -88,7 +57,7 @@ step "s2-begin"
BEGIN; BEGIN;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {
@ -142,7 +111,7 @@ step "s2-select-for-update"
step "s2-coordinator-create-index-concurrently" step "s2-coordinator-create-index-concurrently"
{ {
CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id); CREATE INDEX CONCURRENTLY dist_table_index ON dist_table(id);
} }
step "s2-commit-worker" step "s2-commit-worker"
@ -186,6 +155,6 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocat
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-begin" "s2-coordinator-drop" "s1-commit-worker" "s2-commit" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-begin" "s2-coordinator-drop" "s1-commit-worker" "s2-commit" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
#Not able to test the next permutations, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 //Not able to test the next permutations, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-colocated-insert-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection"
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-select-via-coordinator" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of INSERT in concurrent operations // create range distributed table to test behavior of INSERT in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_distributed_table('insert_hash', 'id'); SELECT create_distributed_table('insert_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS insert_hash CASCADE; DROP TABLE IF EXISTS insert_hash CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY insert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -41,7 +41,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-insert" { INSERT INTO insert_hash VALUES(7, 'k'); } step "s2-insert" { INSERT INTO insert_hash VALUES(7, 'k'); }
step "s2-insert-multi-row" { INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); } step "s2-insert-multi-row" { INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); }
@ -60,13 +60,13 @@ step "s2-table-size" { SELECT citus_total_relation_size('insert_hash'); }
step "s2-master-modify-multiple-shards" { DELETE FROM insert_hash; } step "s2-master-modify-multiple-shards" { DELETE FROM insert_hash; }
step "s2-distribute-table" { SELECT create_distributed_table('insert_hash', 'id'); } step "s2-distribute-table" { SELECT create_distributed_table('insert_hash', 'id'); }
# permutations - INSERT vs INSERT // permutations - INSERT vs INSERT
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-multi-row" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-multi-row" "s1-commit" "s1-select-count"
# permutations - INSERT first // permutations - INSERT first
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-update" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-delete" "s1-commit" "s1-select-count"
@ -82,7 +82,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert" "s2-table-size" "s1-commit" "
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - INSERT second // permutations - INSERT second
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert" "s1-commit" "s1-select-count"
@ -97,7 +97,7 @@ permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-insert" "s1-commit" "
permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-insert" "s1-commit" "s1-select-count"
# permutations - multi row INSERT first // permutations - multi row INSERT first
permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-update" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-delete" "s1-commit" "s1-select-count"
@ -113,7 +113,7 @@ permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-table-size" "s1
permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - multi row INSERT second // permutations - multi row INSERT second
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-multi-row" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert-multi-row" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert-multi-row" "s1-commit" "s1-select-count"

View File

@ -1,54 +1,24 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE insert_table(id integer, value integer); CREATE TABLE insert_table(id integer, value integer);
SELECT create_distributed_table('insert_table', 'id'); SELECT create_distributed_table('insert_table', 'id');
COPY insert_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; COPY insert_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS insert_table CASCADE; DROP TABLE IF EXISTS insert_table CASCADE;
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
session "s1" session "s1"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -83,7 +53,7 @@ step "s1-stop-connection"
session "s2" session "s2"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {
@ -177,5 +147,5 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert-
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker""s3-select-count" "s1-stop-connection" "s2-stop-connection"
#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 //Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection"

View File

@ -39,9 +39,9 @@ step "s2-vacuum-full"
VACUUM FULL test_insert_vacuum; VACUUM FULL test_insert_vacuum;
} }
# INSERT and VACUUM ANALYZE should not block each other. // INSERT and VACUUM ANALYZE should not block each other.
permutation "s1-begin" "s1-insert" "s2-vacuum-analyze" "s1-commit" permutation "s1-begin" "s1-insert" "s2-vacuum-analyze" "s1-commit"
# INSERT and VACUUM FULL should block each other. // INSERT and VACUUM FULL should block each other.
permutation "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit" permutation "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit"

View File

@ -60,5 +60,5 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# concurrent master_append_table_to_shard tests // concurrent master_append_table_to_shard tests
permutation "s1-begin" "s2-begin" "s1-master_append_table_to_shard" "s2-master_append_table_to_shard" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-master_append_table_to_shard" "s2-master_append_table_to_shard" "s1-commit" "s2-commit"

View File

@ -56,7 +56,7 @@ step "s2-commit"
COMMIT; COMMIT;
} }
#concurrent master_apply_delete_command vs master_apply_delete_command //concurrent master_apply_delete_command vs master_apply_delete_command
permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_all_shard" "s2-master_apply_delete_command_all_shard" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_all_shard" "s2-master_apply_delete_command_all_shard" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_all_shard" "s2-master_apply_delete_command_row" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_all_shard" "s2-master_apply_delete_command_row" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_row" "s2-master_apply_delete_command_all_shard" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-master_apply_delete_command_row" "s2-master_apply_delete_command_all_shard" "s1-commit" "s2-commit"

View File

@ -81,7 +81,7 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# tests to check locks on subqueries are taken // tests to check locks on subqueries are taken
permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-insert_to_events_test_table" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-insert_to_events_test_table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-update_events_test_table" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-update_events_test_table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-delete_events_test_table" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-modify_with_subquery_v1" "s1-delete_events_test_table" "s2-commit" "s1-commit"

View File

@ -136,34 +136,34 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# test with parallel connections // test with parallel connections
permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-select" "s1-commit" "s2-select" "s2-commit" permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-select" "s1-commit" "s2-select" "s2-commit"
permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-update_all_value_1" "s1-commit" "s2-commit" permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-update_all_value_1" "s1-commit" "s2-commit"
# test without deadlock prevention (first does not conflict, second does) // test without deadlock prevention (first does not conflict, second does)
permutation "s1-begin" "s1-update_even_concurrently" "s2-begin" "s2-update_odd_concurrently" "s1-commit" "s2-commit" permutation "s1-begin" "s1-update_even_concurrently" "s2-begin" "s2-update_odd_concurrently" "s1-commit" "s2-commit"
permutation "s1-begin" "s1-update_even_concurrently" "s2-begin" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" permutation "s1-begin" "s1-update_even_concurrently" "s2-begin" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit"
# test with shard pruning (should not conflict) // test with shard pruning (should not conflict)
permutation "s1-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" "s2-select"
permutation "s1-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" "s2-select"
# test with inserts // test with inserts
permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-insert-to-table" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-insert-to-table" "s1-commit" "s2-commit" "s2-select"
permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-insert-into-select" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-update_all_value_1" "s2-begin" "s2-insert-into-select" "s1-commit" "s2-commit" "s2-select"
# multi-shard update affecting the same rows // multi-shard update affecting the same rows
permutation "s1-begin" "s2-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-update_value_1_of_1_or_3_to_5" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit"
# multi-shard update affecting the different rows // multi-shard update affecting the different rows
permutation "s1-begin" "s2-begin" "s2-update_value_1_of_1_or_3_to_8" "s1-update_value_1_of_2_or_4_to_5" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-update_value_1_of_1_or_3_to_8" "s1-update_value_1_of_2_or_4_to_5" "s2-commit" "s1-commit"
# test with sequential connections, sequential tests should not block each other // test with sequential connections, sequential tests should not block each other
# if they are targeting different shards. If multiple connections updating the same // if they are targeting different shards. If multiple connections updating the same
# row, second one must wait for the first one. // row, second one must wait for the first one.
permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_all_value_1" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_all_value_1" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_all_value_1" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_all_value_1" "s1-commit" "s2-commit" "s2-select"
permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" "s2-select"
permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" "s2-select" permutation "s1-begin" "s1-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-begin" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_4_or_6_to_4" "s1-commit" "s2-commit" "s2-select"
# multi-shard update affecting the same rows // multi-shard update affecting the same rows
permutation "s1-begin" "s2-begin" "s1-change_connection_mode_to_sequential" "s2-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-change_connection_mode_to_sequential" "s2-change_connection_mode_to_sequential" "s1-update_value_1_of_1_or_3_to_5" "s2-update_value_1_of_1_or_3_to_8" "s1-commit" "s2-commit"
# multi-shard update affecting the different rows // multi-shard update affecting the different rows
permutation "s1-begin" "s2-begin" "s1-change_connection_mode_to_sequential" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_1_or_3_to_8" "s1-update_value_1_of_2_or_4_to_5" "s1-commit" "s2-commit" permutation "s1-begin" "s2-begin" "s1-change_connection_mode_to_sequential" "s2-change_connection_mode_to_sequential" "s2-update_value_1_of_1_or_3_to_8" "s1-update_value_1_of_2_or_4_to_5" "s1-commit" "s2-commit"

View File

@ -25,7 +25,7 @@ teardown
session "s1" session "s1"
# run_command_on_placements is done in a separate step because the setup is executed as a single transaction // run_command_on_placements is done in a separate step because the setup is executed as a single transaction
step "s1-grant" step "s1-grant"
{ {
SET ROLE test_user_1; SET ROLE test_user_1;
@ -117,17 +117,17 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# REINDEX // REINDEX
permutation "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-commit" "s1-commit"
permutation "s1-grant" "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-insert" "s2-commit" "s1-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s2-reindex" "s1-insert" "s2-insert" "s2-commit" "s1-commit"
permutation "s1-grant" "s1-begin" "s2-begin" "s1-reindex" "s2-insert" "s1-insert" "s1-commit" "s2-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s1-reindex" "s2-insert" "s1-insert" "s1-commit" "s2-commit"
# CREATE INDEX // CREATE INDEX
permutation "s1-begin" "s2-begin" "s2-index" "s1-insert" "s2-commit" "s1-commit" "s2-drop-index" permutation "s1-begin" "s2-begin" "s2-index" "s1-insert" "s2-commit" "s1-commit" "s2-drop-index"
permutation "s1-grant" "s1-begin" "s2-begin" "s2-insert" "s1-index" "s2-insert" "s2-commit" "s1-commit" "s1-drop-index" permutation "s1-grant" "s1-begin" "s2-begin" "s2-insert" "s1-index" "s2-insert" "s2-commit" "s1-commit" "s1-drop-index"
permutation "s1-grant" "s1-begin" "s2-begin" "s1-index" "s2-index" "s1-insert" "s1-commit" "s2-commit" "s1-drop-index" "s2-drop-index" permutation "s1-grant" "s1-begin" "s2-begin" "s1-index" "s2-index" "s1-insert" "s1-commit" "s2-commit" "s1-drop-index" "s2-drop-index"
# TRUNCATE // TRUNCATE
permutation "s1-begin" "s2-begin" "s2-truncate" "s1-insert" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-truncate" "s1-insert" "s2-commit" "s1-commit"
permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-insert" "s1-insert" "s1-commit" "s2-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-insert" "s1-insert" "s1-commit" "s2-commit"
permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" permutation "s1-grant" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit"

View File

@ -0,0 +1,35 @@
// Create and use UDF to send commands from the same connection. Also make the cluster
// ready for testing MX functionalities.
setup
{
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
}

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create append distributed table to test behavior of COPY in concurrent operations // create append distributed table to test behavior of COPY in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -13,13 +13,13 @@ setup
SELECT create_distributed_table('partitioned_copy', 'id'); SELECT create_distributed_table('partitioned_copy', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS partitioned_copy CASCADE; DROP TABLE IF EXISTS partitioned_copy CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -54,7 +54,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-copy" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-copy-additional-column" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } step "s2-copy-additional-column" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
@ -82,10 +82,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM partitioned_copy; }
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); } step "s2-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); }
# permutations - COPY vs COPY // permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
# permutations - COPY first // permutations - COPY first
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count"
@ -103,7 +103,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shar
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - COPY second // permutations - COPY second
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -1,8 +1,8 @@
# Isolation tests for checking the progress monitoring infrastructure // Isolation tests for checking the progress monitoring infrastructure
# We create three different processes, two of the type "1337" and one of type "3778" // We create three different processes, two of the type "1337" and one of type "3778"
# We utilize advisory locks to control steps of the processes // We utilize advisory locks to control steps of the processes
# Different locks are held for each step so that the processes stop at each step and // Different locks are held for each step so that the processes stop at each step and
# we can see their progress. // we can see their progress.
setup setup
{ {

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create append distributed table to test behavior of COPY in concurrent operations // create append distributed table to test behavior of COPY in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_distributed_table('range_copy', 'id', 'append'); SELECT create_distributed_table('range_copy', 'id', 'append');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS range_copy CASCADE; DROP TABLE IF EXISTS range_copy CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -53,7 +53,7 @@ step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-copy" { COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy" { COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-copy-additional-column" { COPY range_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } step "s2-copy-additional-column" { COPY range_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
@ -82,10 +82,10 @@ step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELE
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); } step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); }
# permutations - COPY vs COPY // permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
# permutations - COPY first // permutations - COPY first
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count"
@ -107,7 +107,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-apply-delete-command
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - COPY second // permutations - COPY second
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -151,49 +151,49 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# Check that we get necessary resource locks // Check that we get necessary resource locks
# Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode. // Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode.
permutation "s2-begin" "s2-update-table-1" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-update-table-1" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks"
permutation "s2-begin" "s2-delete-table-1" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-delete-table-1" "s1-view-locks" "s2-rollback" "s1-view-locks"
# Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode. // Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode.
permutation "s2-begin" "s2-update-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-update-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks"
permutation "s2-begin" "s2-delete-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-delete-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks"
# Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode. // Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode.
permutation "s2-begin" "s2-update-table-3" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-update-table-3" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks"
permutation "s2-begin" "s2-delete-table-3" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-delete-table-3" "s1-begin" "s1-view-locks" "s1-rollback" "s2-rollback" "s1-view-locks"
# Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode. // Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode.
permutation "s2-begin" "s2-insert-table-1" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-insert-table-1" "s1-view-locks" "s2-rollback" "s1-view-locks"
# Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. // Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode.
permutation "s2-begin" "s2-insert-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-insert-table-2" "s1-view-locks" "s2-rollback" "s1-view-locks"
# Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. // Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode.
permutation "s2-begin" "s2-insert-table-3" "s1-view-locks" "s2-rollback" "s1-view-locks" permutation "s2-begin" "s2-insert-table-3" "s1-view-locks" "s2-rollback" "s1-view-locks"
# Now some concurrent operations // Now some concurrent operations
# Updates/Deletes from ref_table_1 cascade to ref_table_2, so DML on ref_table_2 should block // Updates/Deletes from ref_table_1 cascade to ref_table_2, so DML on ref_table_2 should block
# Case 1. UPDATE -> DELETE // Case 1. UPDATE -> DELETE
permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-delete-table-2" "s2-commit" "s1-commit" "s1-select-table-2" permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-delete-table-2" "s2-commit" "s1-commit" "s1-select-table-2"
# Case 2. UPDATE -> INSERT // Case 2. UPDATE -> INSERT
permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-insert-table-2" "s2-commit" "s1-commit" "s1-select-table-2" permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-insert-table-2" "s2-commit" "s1-commit" "s1-select-table-2"
# Case 3. UPDATE -> UPDATE // Case 3. UPDATE -> UPDATE
permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-2" permutation "s1-begin" "s2-begin" "s2-update-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-2"
# Case 4. DELETE -> DELETE // Case 4. DELETE -> DELETE
permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-delete-table-2" "s2-commit" "s1-commit" "s1-select-table-2" permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-delete-table-2" "s2-commit" "s1-commit" "s1-select-table-2"
# Case 5. DELETE -> INSERT // Case 5. DELETE -> INSERT
permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-insert-table-2" "s2-commit" "s1-commit" "s1-select-table-2" permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-insert-table-2" "s2-commit" "s1-commit" "s1-select-table-2"
# Case 6. DELETE -> UPDATE // Case 6. DELETE -> UPDATE
permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-2" permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-2"
# Deletes from ref_table_1 can transitively cascade to ref_table_3, so DML on ref_table_3 should block // Deletes from ref_table_1 can transitively cascade to ref_table_3, so DML on ref_table_3 should block
# Case 1. DELETE -> DELETE // Case 1. DELETE -> DELETE
permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-delete-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-delete-table-3" "s2-commit" "s1-commit" "s1-select-table-3"
# Case 2. DELETE -> INSERT, should error out // Case 2. DELETE -> INSERT, should error out
permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-insert-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-insert-table-3" "s2-commit" "s1-commit" "s1-select-table-3"
# Case 3. DELETE -> UPDATE // Case 3. DELETE -> UPDATE
permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-delete-table-1" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3"
# Any DML on any of ref_table_{1,2,3} should block others from DML in the foreign constraint graph ... // Any DML on any of ref_table_{1,2,3} should block others from DML in the foreign constraint graph ...
permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3"
permutation "s1-begin" "s2-begin" "s1-update-table-3" "s2-insert-table-1" "s1-commit" "s2-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s1-update-table-3" "s2-insert-table-1" "s1-commit" "s2-commit" "s1-select-table-3"
permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-update-table-2" "s2-commit" "s1-commit" "s1-select-table-3"
@ -201,7 +201,7 @@ permutation "s1-begin" "s2-begin" "s1-update-table-2" "s2-insert-table-1" "s1-co
permutation "s1-begin" "s2-begin" "s2-insert-table-2" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s2-insert-table-2" "s1-update-table-3" "s2-commit" "s1-commit" "s1-select-table-3"
permutation "s1-begin" "s2-begin" "s1-update-table-3" "s2-insert-table-2" "s1-commit" "s2-commit" "s1-select-table-3" permutation "s1-begin" "s2-begin" "s1-update-table-3" "s2-insert-table-2" "s1-commit" "s2-commit" "s1-select-table-3"
# DMLs shouldn't block select on tables in the same foreign constraint graph // DMLs shouldn't block select on tables in the same foreign constraint graph
permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-1" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-1" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-2" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-2" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-3" "s2-commit" "s1-commit" permutation "s1-begin" "s2-begin" "s2-insert-table-1" "s1-select-table-3" "s2-commit" "s1-commit"

View File

@ -1,20 +1,7 @@
#include "isolation_mx_common.include.spec"
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block.
-- Following is a workaround to overcome that. Port numbers are hard coded
-- at the moment.
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); CREATE TABLE ref_table_1(id int PRIMARY KEY, value int);
SELECT create_reference_table('ref_table_1'); SELECT create_reference_table('ref_table_1');
@ -136,18 +123,18 @@ step "s2-stop-connection"
SELECT stop_session_level_connection_to_node(); SELECT stop_session_level_connection_to_node();
} }
# Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode. // Case 1. UPDATE/DELETE ref_table_1 should only lock its own shard in Exclusive mode.
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
# Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode. // Case 2. Modifying ref_table_2 should also lock ref_table_1 shard in Exclusive mode.
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
# Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode. // Case 3. Modifying ref_table_3 should also lock ref_table_1 and ref_table_2 shards in Exclusive mode.
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-update-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-delete-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
# Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode. // Case 4. Inserting into ref_table_1 should only lock its own shard in RowExclusive mode.
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-1" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
# Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. // Case 5. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode.
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-2" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"
# Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode. // Case 6. Modifying ref_table_2 should also lock ref_table_1 in RowExclusive mode.
permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection" permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-table-3" "s1-start-session-level-connection" "s1-view-locks" "s2-rollback-worker" "s1-view-locks" "s1-stop-connection" "s2-stop-connection"

View File

@ -1,44 +1,13 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE ref_table(id integer, value integer); CREATE TABLE ref_table(id integer, value integer);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS ref_table CASCADE; DROP TABLE IF EXISTS ref_table CASCADE;
@ -47,7 +16,7 @@ teardown
session "s1" session "s1"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -77,7 +46,7 @@ step "s1-stop-connection"
session "s2" session "s2"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {

View File

@ -1,44 +1,13 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE ref_table(id integer, value integer); CREATE TABLE ref_table(id integer, value integer);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS ref_table CASCADE; DROP TABLE IF EXISTS ref_table CASCADE;
@ -52,7 +21,7 @@ step "s1-add-primary-key"
ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id); ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id);
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -92,7 +61,7 @@ step "s1-stop-connection"
session "s2" session "s2"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {
@ -153,5 +122,5 @@ permutation "s1-add-primary-key""s1-start-session-level-connection" "s1-begin-on
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-select-ref-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-select-ref-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-drop" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-drop" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 //Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create append distributed table to test behavior of COPY in concurrent operations // create append distributed table to test behavior of COPY in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_reference_table('reference_copy'); SELECT create_reference_table('reference_copy');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS reference_copy CASCADE; DROP TABLE IF EXISTS reference_copy CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -51,7 +51,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-copy" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } step "s2-copy" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-copy-additional-column" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } step "s2-copy-additional-column" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
@ -81,10 +81,10 @@ step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('reference_copy
step "s2-create-non-distributed-table" { CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s2-create-non-distributed-table" { CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s2-distribute-table" { SELECT create_reference_table('reference_copy'); } step "s2-distribute-table" { SELECT create_reference_table('reference_copy'); }
# permutations - COPY vs COPY // permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
# permutations - COPY first // permutations - COPY first
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count"
@ -104,7 +104,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - COPY second // permutations - COPY second
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -1,44 +1,14 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
CREATE TABLE ref_table(user_id int, value_1 int); CREATE TABLE ref_table(user_id int, value_1 int);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71); INSERT INTO ref_table VALUES (1, 11), (2, 21), (3, 31), (4, 41), (5, 51), (6, 61), (7, 71);
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE ref_table; DROP TABLE ref_table;
@ -52,7 +22,7 @@ step "s1-begin"
BEGIN; BEGIN;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {

View File

@ -1,7 +1,7 @@
# check that replace_isolation_tester_func correctly replaces the functions isolation // check that replace_isolation_tester_func correctly replaces the functions isolation
# tester uses while searching for locks. If those functions aren't correctly replaced // tester uses while searching for locks. If those functions aren't correctly replaced
# this test will timeout, since isolation tester will never notice that s2 is blocked // this test will timeout, since isolation tester will never notice that s2 is blocked
# by s1 on a lock it's taken out on one of the workers // by s1 on a lock it's taken out on one of the workers
setup setup
{ {

View File

@ -110,25 +110,25 @@ step "s2-active-transactions"
SELECT count(*) FROM get_global_active_transactions(); SELECT count(*) FROM get_global_active_transactions();
} }
# we disable the daemon during the regression tests in order to get consistent results // we disable the daemon during the regression tests in order to get consistent results
# thus we manually issue the deadlock detection // thus we manually issue the deadlock detection
session "deadlock-checker" session "deadlock-checker"
# we issue the checker not only when there are deadlocks to ensure that we never cancel // we issue the checker not only when there are deadlocks to ensure that we never cancel
# backend inappropriately // backend inappropriately
step "deadlock-checker-call" step "deadlock-checker-call"
{ {
SELECT check_distributed_deadlocks(); SELECT check_distributed_deadlocks();
} }
# verify that locks on the placement of the reference table on the coordinator is // verify that locks on the placement of the reference table on the coordinator is
# taken into account when looking for distributed deadlocks // taken into account when looking for distributed deadlocks
permutation "s1-begin" "s2-begin" "s1-update-dist-table" "s2-lock-ref-table-placement-on-coordinator" "s1-lock-ref-table-placement-on-coordinator" "s2-update-dist-table" "deadlock-checker-call" "s1-end" "s2-end" permutation "s1-begin" "s2-begin" "s1-update-dist-table" "s2-lock-ref-table-placement-on-coordinator" "s1-lock-ref-table-placement-on-coordinator" "s2-update-dist-table" "deadlock-checker-call" "s1-end" "s2-end"
# verify that *_dist_stat_activity() functions return the correct result when query // verify that *_dist_stat_activity() functions return the correct result when query
# has a task on the coordinator. // has a task on the coordinator.
permutation "s1-begin" "s2-begin" "s1-update-ref-table" "s2-sleep" "s2-view-dist" "s2-view-worker" "s2-end" "s1-end" permutation "s1-begin" "s2-begin" "s1-update-ref-table" "s2-sleep" "s2-view-dist" "s2-view-worker" "s2-end" "s1-end"
# verify that get_*_active_transactions() functions return the correct result when // verify that get_*_active_transactions() functions return the correct result when
# the query has a task on the coordinator. // the query has a task on the coordinator.
permutation "s1-begin" "s2-begin" "s1-update-ref-table" "s2-active-transactions" "s1-end" "s2-end" permutation "s1-begin" "s2-begin" "s1-update-ref-table" "s2-active-transactions" "s1-end" "s2-end"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of SELECT in concurrent operations // create range distributed table to test behavior of SELECT in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_distributed_table('select_append', 'id', 'append'); SELECT create_distributed_table('select_append', 'id', 'append');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS select_append CASCADE; DROP TABLE IF EXISTS select_append CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -49,7 +49,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-router-select" { SELECT * FROM select_append WHERE id = 1; } step "s2-router-select" { SELECT * FROM select_append WHERE id = 1; }
step "s2-real-time-select" { SELECT * FROM select_append ORDER BY 1, 2; } step "s2-real-time-select" { SELECT * FROM select_append ORDER BY 1, 2; }
@ -76,7 +76,7 @@ step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELE
step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); } step "s2-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); }
# permutations - SELECT vs SELECT // permutations - SELECT vs SELECT
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-task-tracker-select" "s1-commit" "s1-select-count"
@ -87,7 +87,7 @@ permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-router-selec
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-task-tracker-select" "s1-commit" "s1-select-count"
# permutations - router SELECT first // permutations - router SELECT first
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-update" "s1-commit" "s1-select-count"
@ -106,7 +106,7 @@ permutation "s1-initialize" "s1-begin" "s2-master-apply-delete-command" "s1-comm
permutation "s1-initialize" "s1-begin" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-router-select" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-router-select" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - router SELECT second // permutations - router SELECT second
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-update" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-router-select" "s1-commit" "s1-select-count"
@ -124,7 +124,7 @@ permutation "s1-initialize" "s1-begin" "s1-master-apply-delete-command" "s1-comm
permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-router-select" "s1-commit" "s1-select-count"
# permutations - real-time SELECT first // permutations - real-time SELECT first
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-update" "s1-commit" "s1-select-count"
@ -141,7 +141,7 @@ permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-table-size" "s1
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-real-time-select" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-real-time-select" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - real-time SELECT second // permutations - real-time SELECT second
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-update" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-real-time-select" "s1-commit" "s1-select-count"
@ -157,7 +157,7 @@ permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-real-time-select" "s1
permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-real-time-select" "s1-commit" "s1-select-count"
# permutations - task-tracker SELECT first // permutations - task-tracker SELECT first
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-update" "s1-commit" "s1-select-count"
@ -174,7 +174,7 @@ permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-table-size"
permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-task-tracker-select" "s2-distribute-table" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-task-tracker-select" "s2-distribute-table" "s1-commit" "s1-select-count"
# permutations - task-tracker SELECT second // permutations - task-tracker SELECT second
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-task-tracker-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-task-tracker-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-update" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-task-tracker-select" "s1-commit" "s1-select-count"

View File

@ -1,45 +1,14 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE select_table(id integer, value integer); CREATE TABLE select_table(id integer, value integer);
SELECT create_distributed_table('select_table', 'id'); SELECT create_distributed_table('select_table', 'id');
COPY select_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; COPY select_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS select_table CASCADE; DROP TABLE IF EXISTS select_table CASCADE;
@ -48,7 +17,7 @@ teardown
session "s1" session "s1"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -83,7 +52,7 @@ step "s2-begin"
BEGIN; BEGIN;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {

View File

@ -1,5 +1,5 @@
# the test expects to have zero nodes in pg_dist_node at the beginning // the test expects to have zero nodes in pg_dist_node at the beginning
# add single one of the nodes for the purpose of the test // add single one of the nodes for the purpose of the test
setup setup
{ {
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;

View File

@ -38,8 +38,8 @@ step "s2-recover"
SELECT recover_prepared_transactions(); SELECT recover_prepared_transactions();
} }
# Recovery and 2PCs should not block each other // Recovery and 2PCs should not block each other
permutation "s1-begin" "s1-recover" "s2-insert" "s1-commit" permutation "s1-begin" "s1-recover" "s2-insert" "s1-commit"
# Recovery should not run concurrently // Recovery should not run concurrently
permutation "s1-begin" "s1-recover" "s2-recover" "s1-commit" permutation "s1-begin" "s1-recover" "s2-recover" "s1-commit"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of TRUNCATE in concurrent operations // create range distributed table to test behavior of TRUNCATE in concurrent operations
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('truncate_append', 'id', 'append'); SELECT create_distributed_table('truncate_append', 'id', 'append');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS truncate_append CASCADE; DROP TABLE IF EXISTS truncate_append CASCADE;
@ -21,7 +21,7 @@ teardown
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -43,7 +43,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-truncate" { TRUNCATE truncate_append; } step "s2-truncate" { TRUNCATE truncate_append; }
@ -63,10 +63,10 @@ step "s2-distribute-table" { SELECT create_distributed_table('truncate_append',
step "s2-select" { SELECT * FROM truncate_append ORDER BY 1, 2; } step "s2-select" { SELECT * FROM truncate_append ORDER BY 1, 2; }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - TRUNCATE vs TRUNCATE // permutations - TRUNCATE vs TRUNCATE
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
# permutations - TRUNCATE first // permutations - TRUNCATE first
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-drop" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"
@ -81,7 +81,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-apply
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-drop-all-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-drop-all-shards" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s2-begin" "s1-truncate" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s2-begin" "s1-truncate" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count"
# permutations - TRUNCATE second // permutations - TRUNCATE second
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes"

View File

@ -1,45 +1,14 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE truncate_table(id integer, value integer); CREATE TABLE truncate_table(id integer, value integer);
SELECT create_distributed_table('truncate_table', 'id'); SELECT create_distributed_table('truncate_table', 'id');
COPY truncate_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; COPY truncate_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV;
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS truncate_table CASCADE; DROP TABLE IF EXISTS truncate_table CASCADE;
@ -53,7 +22,7 @@ step "s1-begin"
BEGIN; BEGIN;
} }
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -118,7 +87,7 @@ step "s1-commit"
session "s2" session "s2"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {

View File

@ -1,44 +1,13 @@
# Create and use UDF to send commands from the same connection. Also make the cluster #include "isolation_mx_common.include.spec"
# ready for testing MX functionalities.
setup setup
{ {
CREATE OR REPLACE FUNCTION start_session_level_connection_to_node(text, integer)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$start_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text)
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$run_commands_on_session_level_connection_to_node$$;
CREATE OR REPLACE FUNCTION stop_session_level_connection_to_node()
RETURNS void
LANGUAGE C STRICT VOLATILE
AS 'citus', $$stop_session_level_connection_to_node$$;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57636]::int[],
ARRAY[format('SELECT start_metadata_sync_to_node(''%s'', %s)', nodename, nodeport)]::text[],
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE dist_table(id integer, value integer); CREATE TABLE dist_table(id integer, value integer);
SELECT create_distributed_table('dist_table', 'id'); SELECT create_distributed_table('dist_table', 'id');
} }
# Create and use UDF to close the connection opened in the setup step. Also return the cluster // Create and use UDF to close the connection opened in the setup step. Also return the cluster
# back to the initial state. // back to the initial state.
teardown teardown
{ {
DROP TABLE IF EXISTS dist_table CASCADE; DROP TABLE IF EXISTS dist_table CASCADE;
@ -47,7 +16,7 @@ teardown
session "s1" session "s1"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s1-start-session-level-connection" step "s1-start-session-level-connection"
{ {
@ -82,7 +51,7 @@ step "s1-stop-connection"
session "s2" session "s2"
# We do not need to begin a transaction on coordinator, since it will be open on workers. // We do not need to begin a transaction on coordinator, since it will be open on workers.
step "s2-start-session-level-connection" step "s2-start-session-level-connection"
{ {
@ -142,5 +111,5 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-alter-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-alter-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count"
permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection"
#Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 //Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966
#permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection"

View File

@ -94,13 +94,13 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# session 1 updates node 1, session 2 updates node 2, should be ok // session 1 updates node 1, session 2 updates node 2, should be ok
permutation "s1-begin" "s1-update-node-1" "s2-update-node-2" "s1-commit" "s1-show-nodes" permutation "s1-begin" "s1-update-node-1" "s2-update-node-2" "s1-commit" "s1-show-nodes"
# sessions 1 updates node 1, session 2 tries to do the same // sessions 1 updates node 1, session 2 tries to do the same
permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-update-node-1" "s1-commit" "s2-abort" "s1-show-nodes" permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-update-node-1" "s1-commit" "s2-abort" "s1-show-nodes"
# master_update_node should block start_metadata_sync_to_node. Note that we // master_update_node should block start_metadata_sync_to_node. Note that we
# cannot run start_metadata_sync_to_node in a transaction, so we're not // cannot run start_metadata_sync_to_node in a transaction, so we're not
# testing the reverse order here. // testing the reverse order here.
permutation "s1-begin" "s1-update-node-1" "s2-start-metadata-sync-node-2" "s1-commit" "s2-verify-metadata" permutation "s1-begin" "s1-update-node-1" "s2-start-metadata-sync-node-2" "s1-commit" "s2-verify-metadata"

View File

@ -65,7 +65,7 @@ step "s2-commit"
COMMIT; COMMIT;
} }
# session 1 updates node 1, session 2 writes should be blocked // session 1 updates node 1, session 2 writes should be blocked
permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-insert" "s1-commit" "s2-abort" permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-insert" "s1-commit" "s2-abort"
permutation "s2-begin" "s2-insert" "s1-update-node-1" "s2-commit" permutation "s2-begin" "s2-insert" "s1-update-node-1" "s2-commit"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of UPDATE in concurrent operations // create range distributed table to test behavior of UPDATE in concurrent operations
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('update_hash', 'id'); SELECT create_distributed_table('update_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS update_hash CASCADE; DROP TABLE IF EXISTS update_hash CASCADE;
@ -21,7 +21,7 @@ teardown
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY update_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -43,7 +43,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-update" { UPDATE update_hash SET data = 'l' WHERE id = 4; } step "s2-update" { UPDATE update_hash SET data = 'l' WHERE id = 4; }
@ -61,10 +61,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM update_hash; }
step "s2-distribute-table" { SELECT create_distributed_table('update_hash', 'id'); } step "s2-distribute-table" { SELECT create_distributed_table('update_hash', 'id'); }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - UPDATE vs UPDATE // permutations - UPDATE vs UPDATE
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-update" "s1-commit" "s2-commit" "s1-select-count"
# permutations - UPDATE first // permutations - UPDATE first
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-delete" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-drop" "s1-commit" "s2-commit" "s1-select-count"
@ -78,7 +78,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-table-size" "s
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count"
# permutations - UPDATE second // permutations - UPDATE second
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-update" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-update" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-update" "s1-commit" "s2-commit" "s1-select-count"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create range distributed table to test behavior of UPSERT in concurrent operations // create range distributed table to test behavior of UPSERT in concurrent operations
setup setup
{ {
SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.replace_isolation_tester_func();
@ -13,7 +13,7 @@ setup
SELECT create_distributed_table('upsert_hash', 'id'); SELECT create_distributed_table('upsert_hash', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS upsert_hash CASCADE; DROP TABLE IF EXISTS upsert_hash CASCADE;
@ -21,7 +21,7 @@ teardown
SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_internal.restore_isolation_tester_func();
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY upsert_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -44,7 +44,7 @@ step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-upsert" { INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; } step "s2-upsert" { INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; }
@ -63,10 +63,10 @@ step "s2-master-modify-multiple-shards" { DELETE FROM upsert_hash; }
step "s2-distribute-table" { SELECT create_distributed_table('upsert_hash', 'id'); } step "s2-distribute-table" { SELECT create_distributed_table('upsert_hash', 'id'); }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - UPSERT vs UPSERT // permutations - UPSERT vs UPSERT
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count"
# permutations - UPSERT first // permutations - UPSERT first
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-update" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-delete" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count"
@ -81,7 +81,7 @@ permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-table-size" "s
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count"
# permutations - UPSERT second // permutations - UPSERT second
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count"

View File

@ -1,8 +1,8 @@
# //
# How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
# //
# create distributed table to test behavior of VALIDATE in concurrent operations // create distributed table to test behavior of VALIDATE in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -10,13 +10,13 @@ setup
SELECT create_distributed_table('constrained_table', 'id'); SELECT create_distributed_table('constrained_table', 'id');
} }
# drop distributed table // drop distributed table
teardown teardown
{ {
DROP TABLE IF EXISTS constrained_table CASCADE; DROP TABLE IF EXISTS constrained_table CASCADE;
} }
# session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); } step "s1-initialize" { INSERT INTO constrained_table VALUES (0, 0), (1, 1), (2, 2), (3, 4); }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
@ -24,7 +24,7 @@ step "s1-add-constraint" { ALTER TABLE constrained_table ADD CONSTRAINT check_co
step "s1-validate" { ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; } step "s1-validate" { ALTER TABLE constrained_table VALIDATE CONSTRAINT check_constraint; }
step "s1-commit" { COMMIT; } step "s1-commit" { COMMIT; }
# session 2 // session 2
session "s2" session "s2"
step "s2-begin" { BEGIN; } step "s2-begin" { BEGIN; }
step "s2-insert" { INSERT INTO constrained_table VALUES(10, 10); } step "s2-insert" { INSERT INTO constrained_table VALUES(10, 10); }
@ -32,7 +32,7 @@ step "s2-insert-invalid" { INSERT INTO constrained_table VALUES(100, 100); }
step "s2-select" { SELECT sum(int_data) FROM constrained_table; } step "s2-select" { SELECT sum(int_data) FROM constrained_table; }
step "s2-commit" { COMMIT; } step "s2-commit" { COMMIT; }
# permutations - check read and write are not blocked during validate queries // permutations - check read and write are not blocked during validate queries
permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s1-validate" "s2-insert" "s1-commit" "s2-commit" permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s1-validate" "s2-insert" "s1-commit" "s2-commit"
permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s1-validate" "s2-select" "s1-commit" "s2-commit" permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s1-validate" "s2-select" "s1-commit" "s2-commit"
permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s2-insert" "s1-validate" "s1-commit" "s2-commit" permutation "s1-initialize" "s1-add-constraint" "s1-begin" "s2-begin" "s2-insert" "s1-validate" "s1-commit" "s2-commit"