From 4c94bf3eaea73c737f72e25f0ce741d10fe543cd Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 15 Aug 2018 16:55:21 +0300 Subject: [PATCH] Make sure that modifying CTEs always use the correct execution mode --- .../distributed/executor/citus_custom_scan.c | 60 +- .../executor/multi_router_executor.c | 172 ++- .../distributed/multi_router_executor.h | 3 +- .../foreign_key_restriction_enforcement.out | 147 +- .../foreign_key_restriction_enforcement_0.out | 1297 +++++++++++++++++ .../foreign_key_restriction_enforcement.sql | 63 + 6 files changed, 1620 insertions(+), 122 deletions(-) create mode 100644 src/test/regress/expected/foreign_key_restriction_enforcement_0.out diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index 97c8e15dc..ec3278c52 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -88,19 +88,10 @@ static CustomExecMethods TaskTrackerCustomExecMethods = { .ExplainCustomScan = CitusExplainScan }; -static CustomExecMethods RouterSequentialModifyCustomExecMethods = { - .CustomName = "RouterSequentialModifyScan", +static CustomExecMethods RouterModifyCustomExecMethods = { + .CustomName = "RouterModifyScan", .BeginCustomScan = CitusModifyBeginScan, - .ExecCustomScan = RouterSequentialModifyExecScan, - .EndCustomScan = CitusEndScan, - .ReScanCustomScan = CitusReScan, - .ExplainCustomScan = CitusExplainScan -}; - -static CustomExecMethods RouterMultiModifyCustomExecMethods = { - .CustomName = "RouterMultiModifyScan", - .BeginCustomScan = CitusModifyBeginScan, - .ExecCustomScan = RouterMultiModifyExecScan, + .ExecCustomScan = RouterModifyExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CitusExplainScan @@ -187,6 +178,8 @@ RouterCreateScan(CustomScan *scan) List *taskList = NIL; bool isModificationQuery = false; + List *relationRowLockList = NIL; + scanState->executorType = MULTI_EXECUTOR_ROUTER; scanState->customScanState.ss.ps.type = T_CustomScanState; scanState->distributedPlan = GetDistributedPlan(scan); @@ -194,47 +187,22 @@ RouterCreateScan(CustomScan *scan) distributedPlan = scanState->distributedPlan; workerJob = distributedPlan->workerJob; taskList = workerJob->taskList; - isModificationQuery = IsModifyDistributedPlan(distributedPlan); - /* check whether query has at most one shard */ - if (list_length(taskList) <= 1) + if (list_length(taskList) == 1) { - List *relationRowLockList = NIL; - if (list_length(taskList) == 1) - { - Task *task = (Task *) linitial(taskList); - relationRowLockList = task->relationRowLockList; - } + Task *task = (Task *) linitial(taskList); + relationRowLockList = task->relationRowLockList; + } - /* if query is SELECT ... FOR UPDATE query, use modify logic */ - if (isModificationQuery || relationRowLockList != NIL) - { - scanState->customScanState.methods = &RouterSequentialModifyCustomExecMethods; - } - else - { - scanState->customScanState.methods = &RouterSelectCustomExecMethods; - } + /* if query is SELECT ... FOR UPDATE query, use modify logic */ + if (isModificationQuery || relationRowLockList != NIL) + { + scanState->customScanState.methods = &RouterModifyCustomExecMethods; } else { - Assert(isModificationQuery); - - if (IsMultiRowInsert(workerJob->jobQuery) || - MultiShardConnectionType == SEQUENTIAL_CONNECTION) - { - /* - * Multi shard modifications while multi_shard_modify_mode equals - * to 'sequential' or Multi-row INSERT are executed sequentially - * instead of using parallel connections. - */ - scanState->customScanState.methods = &RouterSequentialModifyCustomExecMethods; - } - else - { - scanState->customScanState.methods = &RouterMultiModifyCustomExecMethods; - } + scanState->customScanState.methods = &RouterSelectCustomExecMethods; } return (Node *) scanState; diff --git a/src/backend/distributed/executor/multi_router_executor.c b/src/backend/distributed/executor/multi_router_executor.c index 820a11d09..d95df4a6d 100644 --- a/src/backend/distributed/executor/multi_router_executor.c +++ b/src/backend/distributed/executor/multi_router_executor.c @@ -96,6 +96,8 @@ static int64 ExecuteModifyTasks(List *taskList, bool expectResults, static void AcquireExecutorShardLock(Task *task, CmdType commandType); static void AcquireExecutorMultiShardLocks(List *taskList); static bool RequiresConsistentSnapshot(Task *task); +static void RouterMultiModifyExecScan(CustomScanState *node); +static void RouterSequentialModifyExecScan(CustomScanState *node); static void ExtractParametersFromParamListInfo(ParamListInfo paramListInfo, Oid **parameterTypes, const char ***parameterValues); @@ -534,12 +536,15 @@ CitusModifyBeginScan(CustomScanState *node, EState *estate, int eflags) /* - * RouterSequentialModifyExecScan executes 0 or more modifications on a - * distributed table sequentially and returns results if there are any. - * Note that we also use this path for SELECT ... FOR UPDATE queries. + * RouterModifyExecScan executes a list of tasks on remote nodes, retrieves + * the results and, if RETURNING is used or SELECT FOR UPDATE executed, + * returns the results with a TupleTableSlot. + * + * The function can handle both single task query executions, + * sequential or parallel multi-task query executions. */ TupleTableSlot * -RouterSequentialModifyExecScan(CustomScanState *node) +RouterModifyExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; @@ -547,63 +552,26 @@ RouterSequentialModifyExecScan(CustomScanState *node) if (!scanState->finishedRemoteScan) { DistributedPlan *distributedPlan = scanState->distributedPlan; - bool hasReturning = distributedPlan->hasReturning; Job *workerJob = distributedPlan->workerJob; List *taskList = workerJob->taskList; - ListCell *taskCell = NULL; - bool multipleTasks = list_length(taskList) > 1; - EState *executorState = scanState->customScanState.ss.ps.state; - bool taskListRequires2PC = TaskListRequires2PC(taskList); - bool alwaysThrowErrorOnFailure = false; - CmdType operation = scanState->distributedPlan->operation; - - /* - * We could naturally handle function-based transactions (i.e. those using - * PL/pgSQL or similar) by checking the type of queryDesc->dest, but some - * customers already use functions that touch multiple shards from within - * a function, so we'll ignore functions for now. - */ - if (IsTransactionBlock() || multipleTasks || taskListRequires2PC) - { - BeginOrContinueCoordinatedTransaction(); - - /* - * Although using two phase commit protocol is an independent decision than - * failing on any error, we prefer to couple them. Our motivation is that - * the failures are rare, and we prefer to avoid marking placements invalid - * in case of failures. - * - * For reference tables, we always set alwaysThrowErrorOnFailure since we - * absolutely want to avoid marking any placements invalid. - * - * We also cannot handle failures when there is RETURNING and there are more - * than one task to execute. - */ - if (taskListRequires2PC) - { - CoordinatedTransactionUse2PC(); - - alwaysThrowErrorOnFailure = true; - } - else if (multipleTasks && hasReturning) - { - alwaysThrowErrorOnFailure = true; - } - } + bool parallelExecution = true; ExecuteSubPlans(distributedPlan); - foreach(taskCell, taskList) + if (list_length(taskList) <= 1 || + IsMultiRowInsert(workerJob->jobQuery) || + MultiShardConnectionType == SEQUENTIAL_CONNECTION) { - Task *task = (Task *) lfirst(taskCell); + parallelExecution = false; + } - /* - * Result is expected for SELECT ... FOR UPDATE queries as well. - */ - executorState->es_processed += - ExecuteSingleModifyTask(scanState, task, operation, - alwaysThrowErrorOnFailure, - hasReturning || task->relationRowLockList != NIL); + if (parallelExecution) + { + RouterMultiModifyExecScan(node); + } + else + { + RouterSequentialModifyExecScan(node); } scanState->finishedRemoteScan = true; @@ -615,6 +583,75 @@ RouterSequentialModifyExecScan(CustomScanState *node) } +/* + * RouterSequentialModifyExecScan executes 0 or more modifications on a + * distributed table sequentially and stores them in custom scan's tuple + * store. Note that we also use this path for SELECT ... FOR UPDATE queries. + */ +static void +RouterSequentialModifyExecScan(CustomScanState *node) +{ + CitusScanState *scanState = (CitusScanState *) node; + DistributedPlan *distributedPlan = scanState->distributedPlan; + bool hasReturning = distributedPlan->hasReturning; + Job *workerJob = distributedPlan->workerJob; + List *taskList = workerJob->taskList; + ListCell *taskCell = NULL; + bool multipleTasks = list_length(taskList) > 1; + EState *executorState = scanState->customScanState.ss.ps.state; + bool taskListRequires2PC = TaskListRequires2PC(taskList); + bool alwaysThrowErrorOnFailure = false; + CmdType operation = scanState->distributedPlan->operation; + + Assert(!scanState->finishedRemoteScan); + + /* + * We could naturally handle function-based transactions (i.e. those using + * PL/pgSQL or similar) by checking the type of queryDesc->dest, but some + * customers already use functions that touch multiple shards from within + * a function, so we'll ignore functions for now. + */ + if (IsTransactionBlock() || multipleTasks || taskListRequires2PC) + { + BeginOrContinueCoordinatedTransaction(); + + /* + * Although using two phase commit protocol is an independent decision than + * failing on any error, we prefer to couple them. Our motivation is that + * the failures are rare, and we prefer to avoid marking placements invalid + * in case of failures. + * + * For reference tables, we always set alwaysThrowErrorOnFailure since we + * absolutely want to avoid marking any placements invalid. + * + * We also cannot handle failures when there is RETURNING and there are more + * than one task to execute. + */ + if (taskListRequires2PC) + { + CoordinatedTransactionUse2PC(); + + alwaysThrowErrorOnFailure = true; + } + else if (multipleTasks && hasReturning) + { + alwaysThrowErrorOnFailure = true; + } + } + + + foreach(taskCell, taskList) + { + Task *task = (Task *) lfirst(taskCell); + bool expectResults = (hasReturning || task->relationRowLockList != NIL); + + executorState->es_processed += + ExecuteSingleModifyTask(scanState, task, operation, + alwaysThrowErrorOnFailure, expectResults); + } +} + + /* * TaskListRequires2PC determines whether the given task list requires 2PC * because the tasks provided operates on a reference table or there are multiple @@ -667,31 +704,20 @@ TaskListRequires2PC(List *taskList) /* * RouterMultiModifyExecScan executes a list of tasks on remote nodes, retrieves * the results and, if RETURNING is used, stores them in custom scan's tuple store. - * Then, it returns tuples one by one from this tuple store. */ -TupleTableSlot * +static void RouterMultiModifyExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; - TupleTableSlot *resultSlot = NULL; + DistributedPlan *distributedPlan = scanState->distributedPlan; + Job *workerJob = distributedPlan->workerJob; + List *taskList = workerJob->taskList; + bool hasReturning = distributedPlan->hasReturning; + bool isModificationQuery = true; - if (!scanState->finishedRemoteScan) - { - DistributedPlan *distributedPlan = scanState->distributedPlan; - Job *workerJob = distributedPlan->workerJob; - List *taskList = workerJob->taskList; - bool hasReturning = distributedPlan->hasReturning; - bool isModificationQuery = true; + Assert(!scanState->finishedRemoteScan); - ExecuteSubPlans(distributedPlan); - ExecuteMultipleTasks(scanState, taskList, isModificationQuery, hasReturning); - - scanState->finishedRemoteScan = true; - } - - resultSlot = ReturnTupleFromTuplestore(scanState); - - return resultSlot; + ExecuteMultipleTasks(scanState, taskList, isModificationQuery, hasReturning); } diff --git a/src/include/distributed/multi_router_executor.h b/src/include/distributed/multi_router_executor.h index b6482f24d..10b54e733 100644 --- a/src/include/distributed/multi_router_executor.h +++ b/src/include/distributed/multi_router_executor.h @@ -37,9 +37,8 @@ extern bool AllModificationsCommutative; extern bool EnableDeadlockPrevention; extern void CitusModifyBeginScan(CustomScanState *node, EState *estate, int eflags); -extern TupleTableSlot * RouterSequentialModifyExecScan(CustomScanState *node); extern TupleTableSlot * RouterSelectExecScan(CustomScanState *node); -extern TupleTableSlot * RouterMultiModifyExecScan(CustomScanState *node); +extern TupleTableSlot * RouterModifyExecScan(CustomScanState *node); extern int64 ExecuteModifyTasksWithoutResults(List *taskList); extern int64 ExecuteModifyTasksSequentiallyWithoutResults(List *taskList, diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement.out b/src/test/regress/expected/foreign_key_restriction_enforcement.out index f1f4a40f6..c2647eb21 100644 --- a/src/test/regress/expected/foreign_key_restriction_enforcement.out +++ b/src/test/regress/expected/foreign_key_restriction_enforcement.out @@ -1136,10 +1136,155 @@ DEBUG: verifying table "test_table_2" SET LOCAL client_min_messages TO ERROR; DROP TABLE test_table_2, test_table_1; COMMIT; +-- make sure that modifications to reference tables in a CTE can +-- set the mode to sequential for the next operations +CREATE TABLE reference_table(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table" +DEBUG: building index "reference_table_pkey" on table "reference_table" +SELECT create_reference_table('reference_table'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + +CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table" +DEBUG: building index "distributed_table_pkey" on table "distributed_table" +SELECT create_distributed_table('distributed_table', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE + distributed_table +ADD CONSTRAINT + fkey_delete FOREIGN KEY(value_1) +REFERENCES + reference_table(id) ON DELETE CASCADE; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +-- this query returns 100 rows in Postgres, but not in Citus +-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion +WITH t1 AS (DELETE FROM reference_table RETURNING id) + DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *; +DEBUG: common table expressions are not supported in distributed modifications +DEBUG: generating subplan 92_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 92 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('92_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + id | value_1 | id +----+---------+---- +(0 rows) + +-- load some more data for one more test with real-time selects +INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +-- this query returns 100 rows in Postgres, but not in Citus +-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion +WITH t1 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id; +DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries +DEBUG: generating subplan 96_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + count +------- + 0 +(1 row) + +-- this query should fail since we first to a parallel access to a distributed table +-- with t1, and then access to t2 +WITH t1 AS (DELETE FROM distributed_table RETURNING id), + t2 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; +DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries +DEBUG: generating subplan 98_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: generating subplan 98_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('98_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) +ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +-- similarly this should fail since we first access to a distributed +-- table via t1, and then access to the reference table in the main query +WITH t1 AS (DELETE FROM distributed_table RETURNING id) + DELETE FROM reference_table RETURNING id; +DEBUG: common table expressions are not supported in distributed modifications +DEBUG: generating subplan 101_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: Plan 101 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +-- finally, make sure that we can execute the same queries +-- in the sequential mode +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + WITH t1 AS (DELETE FROM distributed_table RETURNING id), + t2 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; +DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries +DEBUG: generating subplan 103_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: generating subplan 103_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 103 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('103_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('103_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + WITH t1 AS (DELETE FROM distributed_table RETURNING id) + DELETE FROM reference_table RETURNING id; +DEBUG: common table expressions are not supported in distributed modifications +DEBUG: generating subplan 106_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: Plan 106 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id + id +---- +(0 rows) + +ROLLBACK; RESET client_min_messages; DROP SCHEMA test_fkey_to_ref_in_tx CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 5 other objects DETAIL: drop cascades to table referece_table drop cascades to table on_update_fkey_table drop cascades to table unrelated_dist_table +drop cascades to table reference_table +drop cascades to table distributed_table SET search_path TO public; diff --git a/src/test/regress/expected/foreign_key_restriction_enforcement_0.out b/src/test/regress/expected/foreign_key_restriction_enforcement_0.out new file mode 100644 index 000000000..f247906dc --- /dev/null +++ b/src/test/regress/expected/foreign_key_restriction_enforcement_0.out @@ -0,0 +1,1297 @@ +-- +-- Tests multiple commands in transactions where +-- there is foreign key relation between reference +-- tables and distributed tables +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; + version_above_ten +------------------- + t +(1 row) + +CREATE SCHEMA test_fkey_to_ref_in_tx; +SET search_path TO 'test_fkey_to_ref_in_tx'; +SET citus.next_shard_id TO 2380000; +SET citus.next_placement_id TO 2380000; +SET citus.shard_replication_factor TO 1; +CREATE TABLE referece_table(id int PRIMARY KEY); +SELECT create_reference_table('referece_table'); + create_reference_table +------------------------ + +(1 row) + +CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int); +SELECT create_distributed_table('on_update_fkey_table', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +CREATE TABLE unrelated_dist_table(id int PRIMARY KEY, value_1 int); +SELECT create_distributed_table('unrelated_dist_table', 'id'); + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE on_update_fkey_table ADD CONSTRAINT fkey FOREIGN KEY(value_1) REFERENCES referece_table(id) ON UPDATE CASCADE; +INSERT INTO referece_table SELECT i FROM generate_series(0, 100) i; +INSERT INTO on_update_fkey_table SELECT i, i % 100 FROM generate_series(0, 1000) i; +INSERT INTO unrelated_dist_table SELECT i, i % 100 FROM generate_series(0, 1000) i; +-- in order to see when the mode automatically swithces to sequential execution +SET client_min_messages TO DEBUG1; +-- case 1.1: SELECT to a reference table is followed by a parallel SELECT to a distributed table +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + SELECT count(*) FROM on_update_fkey_table; + count +------- + 1001 +(1 row) + +ROLLBACK; +-- case 1.2: SELECT to a reference table is followed by a multiple router SELECTs to a distributed table +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE id = 15; + count +------- + 1 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE id = 16; + count +------- + 1 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE id = 17; + count +------- + 1 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE id = 18; + count +------- + 1 +(1 row) + + +ROLLBACK; +-- case 1.3: SELECT to a reference table is followed by a multi-shard UPDATE to a distributed table +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; +ROLLBACK; +-- case 1.4: SELECT to a reference table is followed by a multiple sing-shard UPDATE to a distributed table +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + UPDATE on_update_fkey_table SET value_1 = 16 WHERE id = 15; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE id = 16; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE id = 17; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE id = 18; +ROLLBACK; +-- case 1.5: SELECT to a reference table is followed by a DDL that touches fkey column +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint; +DEBUG: rewriting table "on_update_fkey_table" +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +DEBUG: validating foreign key constraint "fkey" +ROLLBACK; +-- case 1.6: SELECT to a reference table is followed by an unrelated DDL +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + ALTER TABLE on_update_fkey_table ADD COLUMN X INT; +DEBUG: switching to sequential query execution mode +DETAIL: cannot execute parallel DDL on relation "on_update_fkey_table" after SELECT command on reference relation "referece_table" because there is a foreign key between them and "referece_table" has been accessed in this transaction +ROLLBACK; +-- case 1.7.1: SELECT to a reference table is followed by a DDL that is on +-- the foreign key column +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; + ALTER TABLE on_update_fkey_table DROP COLUMN value_1 CASCADE; +ROLLBACK; +-- case 1.7.2: SELECT to a reference table is followed by a DDL that is on +-- the foreign key column after a parallel query has been executed +BEGIN; + SELECT count(*) FROM unrelated_dist_table; + count +------- + 1001 +(1 row) + + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + ALTER TABLE on_update_fkey_table DROP COLUMN value_1 CASCADE; +ERROR: cannot modify table "on_update_fkey_table" because there was a parallel operation on a distributed table in the transaction +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 1.7.3: SELECT to a reference table is followed by a DDL that is not on +-- the foreign key column, and a parallel query has already been executed +BEGIN; + SELECT count(*) FROM unrelated_dist_table; + count +------- + 1001 +(1 row) + + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + ALTER TABLE on_update_fkey_table ADD COLUMN X INT; +ERROR: cannot execute parallel DDL on relation "on_update_fkey_table" after SELECT command on reference relation "referece_table" because there is a foreign key between them and "referece_table" has been accessed in this transaction +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 1.8: SELECT to a reference table is followed by a COPY +BEGIN; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + + COPY on_update_fkey_table FROM STDIN WITH CSV; +ROLLBACK; +-- case 2.1: UPDATE to a reference table is followed by a multi-shard SELECT +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; + count +------- + 0 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101; + count +------- + 10 +(1 row) + +ROLLBACK; +-- case 2.2: UPDATE to a reference table is followed by multiple router SELECT +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99; + count +------- + 1 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199; + count +------- + 1 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299; + count +------- + 1 +(1 row) + + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399; + count +------- + 1 +(1 row) + +ROLLBACK; +-- case 2.3: UPDATE to a reference table is followed by a multi-shard UPDATE +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + UPDATE on_update_fkey_table SET value_1 = 15; +ROLLBACK; +-- case 2.4: UPDATE to a reference table is followed by multiple router UPDATEs +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 1; + UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 2; + UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 3; + UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 4; +ROLLBACK; +-- case 2.5: UPDATE to a reference table is followed by a DDL that touches fkey column +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE bigint; +DEBUG: rewriting table "on_update_fkey_table" +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +DEBUG: validating foreign key constraint "fkey" +ROLLBACK; +-- case 2.6: UPDATE to a reference table is followed by an unrelated DDL +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + ALTER TABLE on_update_fkey_table ADD COLUMN value_1_X INT; +ROLLBACK; +-- case 2.7: UPDATE to a reference table is followed by COPY +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + COPY on_update_fkey_table FROM STDIN WITH CSV; +ERROR: cannot execute parallel COPY on relation "on_update_fkey_table" after DML command on reference relation "referece_table" because there is a foreign key between them and "referece_table" has been modified in this transaction +DETAIL: COPY to a distributed table uses a separate set of connections which will not be able to see the uncommitted changes to the reference table. +HINT: Perform the COPY in a separate transaction. +CONTEXT: COPY on_update_fkey_table, line 2: "1002,99" +ROLLBACK; +-- case 2.8: UPDATE to a reference table is followed by TRUNCATE +BEGIN; + UPDATE referece_table SET id = 101 WHERE id = 99; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + TRUNCATE on_update_fkey_table; +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +ROLLBACK; +-- case 3.1: an unrelated DDL to a reference table is followed by a real-time SELECT +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DEFAULT 1001; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + SELECT count(*) FROM on_update_fkey_table; + count +------- + 1001 +(1 row) + +ROLLBACK; +-- case 3.2: DDL that touches fkey column to a reference table is followed by a real-time SELECT +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE int; + SELECT count(*) FROM on_update_fkey_table; + count +------- + 1001 +(1 row) + +ROLLBACK; +-- case 3.3: DDL to a reference table followed by a multi shard UPDATE +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DEFAULT 1001; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + UPDATE on_update_fkey_table SET value_1 = 5 WHERE id != 11; +ROLLBACK; +-- case 3.4: DDL to a reference table followed by multiple router UPDATEs +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DEFAULT 1001; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + UPDATE on_update_fkey_table SET value_1 = 98 WHERE id = 1; + UPDATE on_update_fkey_table SET value_1 = 98 WHERE id = 2; + UPDATE on_update_fkey_table SET value_1 = 98 WHERE id = 3; + UPDATE on_update_fkey_table SET value_1 = 98 WHERE id = 4; +ROLLBACK; +-- case 3.5: DDL to reference table followed by a DDL to dist table +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint; +DEBUG: rewriting table "referece_table" +DEBUG: building index "referece_table_pkey" on table "referece_table" serially +DEBUG: validating foreign key constraint "fkey" + CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1); +DEBUG: building index "fkey_test_index_1" on table "on_update_fkey_table" serially +ROLLBACK; +-- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint; +DEBUG: rewriting table "referece_table" +DEBUG: building index "referece_table_pkey" on table "referece_table" serially +DEBUG: validating foreign key constraint "fkey" + ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; +DEBUG: rewriting table "on_update_fkey_table" +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +DEBUG: validating foreign key constraint "fkey" +ROLLBACK; +-- case 3.7: DDL to a reference table is followed by COPY +BEGIN; + ALTER TABLE referece_table ADD COLUMN X int; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + COPY on_update_fkey_table FROM STDIN WITH CSV; +ERROR: cannot execute parallel COPY on relation "on_update_fkey_table" after DDL command on reference relation "referece_table" because there is a foreign key between them and "referece_table" has been modified in this transaction +DETAIL: COPY to a distributed table uses a separate set of connections which will not be able to see the uncommitted changes to the reference table. +HINT: Perform the COPY in a separate transaction. +CONTEXT: COPY on_update_fkey_table, line 2: "1002,99" +ROLLBACK; +-- case 3.8: DDL to a reference table is followed by TRUNCATE +BEGIN; + ALTER TABLE referece_table ADD COLUMN X int; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "referece_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + TRUNCATE on_update_fkey_table; +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +ROLLBACK; +-- case 3.9: DDL to a reference table is followed by TRUNCATE +BEGIN; + ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint; +DEBUG: rewriting table "referece_table" +DEBUG: building index "referece_table_pkey" on table "referece_table" serially +DEBUG: validating foreign key constraint "fkey" + TRUNCATE on_update_fkey_table; +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +ROLLBACK; +----- +--- Now, start testing the other way araound +----- +-- case 4.1: SELECT to a dist table is follwed by a SELECT to a reference table +BEGIN; + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; + count +------- + 10 +(1 row) + + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + +ROLLBACK; +-- case 4.2: SELECT to a dist table is follwed by a DML to a reference table +BEGIN; + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; + count +------- + 10 +(1 row) + + UPDATE referece_table SET id = 101 WHERE id = 99; +ERROR: cannot modify reference table "referece_table" because there was a parallel operation on a distributed table +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 4.3: SELECT to a dist table is follwed by an unrelated DDL to a reference table +BEGIN; + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; + count +------- + 10 +(1 row) + + ALTER TABLE referece_table ADD COLUMN X INT; +ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 4.4: SELECT to a dist table is follwed by a DDL to a reference table +BEGIN; + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; + count +------- + 10 +(1 row) + + ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint; +DEBUG: rewriting table "referece_table" +DEBUG: building index "referece_table_pkey" on table "referece_table" serially +DEBUG: validating foreign key constraint "fkey" +ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 4.5: SELECT to a dist table is follwed by a TRUNCATE +BEGIN; + SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99; + count +------- + 10 +(1 row) + + TRUNCATE referece_table CASCADE; +NOTICE: truncate cascades to table "on_update_fkey_table" +ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel SELECT access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE +BEGIN; + SELECT count(*) FROM on_update_fkey_table WHERE id = 9; + count +------- + 1 +(1 row) + + TRUNCATE referece_table CASCADE; +NOTICE: truncate cascades to table "on_update_fkey_table" +DEBUG: truncate cascades to table "on_update_fkey_table_2380002" +DETAIL: NOTICE from localhost:57638 +DEBUG: truncate cascades to table "on_update_fkey_table_2380004" +DETAIL: NOTICE from localhost:57638 +DEBUG: truncate cascades to table "on_update_fkey_table_2380001" +DETAIL: NOTICE from localhost:57637 +DEBUG: truncate cascades to table "on_update_fkey_table_2380003" +DETAIL: NOTICE from localhost:57637 +DEBUG: building index "referece_table_pkey" on table "referece_table" serially +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +ROLLBACK; +-- case 5.1: Parallel UPDATE on distributed table follow by a SELECT +BEGIN; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; + SELECT count(*) FROM referece_table; + count +------- + 101 +(1 row) + +ROLLBACK; +-- case 5.2: Parallel UPDATE on distributed table follow by a UPDATE +BEGIN; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; + UPDATE referece_table SET id = 160 WHERE id = 15; +ERROR: cannot execute DML on reference relation "referece_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 5.3: Parallel UPDATE on distributed table follow by an unrelated DDL on reference table +BEGIN; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; + ALTER TABLE referece_table ADD COLUMN X INT; +ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 5.4: Parallel UPDATE on distributed table follow by a related DDL on reference table +-- FIXME: Can we do better? +BEGIN; + UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15; + ALTER TABLE referece_table ALTER COLUMN id SET DATA TYPE smallint; +DEBUG: rewriting table "referece_table" +DEBUG: building index "referece_table_pkey" on table "referece_table" serially +DEBUG: validating foreign key constraint "fkey" +ERROR: cannot perform DDL on placement 2380001, which has been read over multiple connections +ROLLBACK; +-- case 6:1: Unrelated parallel DDL on distributed table followed by SELECT on ref. table +BEGIN; + ALTER TABLE on_update_fkey_table ADD COLUMN X int; + SELECT count(*) FROM referece_table; +ERROR: cannot execute SELECT on reference relation "referece_table" because there was a parallel DDL access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 6:2: Related parallel DDL on distributed table followed by SELECT on ref. table +BEGIN; + ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; +DEBUG: rewriting table "on_update_fkey_table" +DEBUG: building index "on_update_fkey_table_pkey" on table "on_update_fkey_table" serially +DEBUG: validating foreign key constraint "fkey" + UPDATE referece_table SET id = 160 WHERE id = 15; +ROLLBACK; +-- case 6:3: Unrelated parallel DDL on distributed table followed by UPDATE on ref. table +BEGIN; + ALTER TABLE on_update_fkey_table ADD COLUMN X int; + SELECT count(*) FROM referece_table; +ERROR: cannot execute SELECT on reference relation "referece_table" because there was a parallel DDL access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 6:4: Related parallel DDL on distributed table followed by SELECT on ref. table +BEGIN; + ALTER TABLE on_update_fkey_table ADD COLUMN X int; + UPDATE referece_table SET id = 160 WHERE id = 15; +ERROR: cannot execute SELECT on reference relation "referece_table" because there was a parallel DDL access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 6:5: Unrelated parallel DDL on distributed table followed by unrelated DDL on ref. table +BEGIN; + ALTER TABLE on_update_fkey_table ADD COLUMN X int; + ALTER TABLE referece_table ADD COLUMN X int; +ERROR: cannot execute DDL on reference relation "referece_table" because there was a parallel DDL access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- case 6:6: Unrelated parallel DDL on distributed table followed by related DDL on ref. table +BEGIN; + ALTER TABLE on_update_fkey_table ADD COLUMN X int; + ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; +ERROR: cannot modify table "on_update_fkey_table" because there was a parallel operation on a distributed table in the transaction +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- some more extensive tests +-- UPDATE on dist table is followed by DELETE to reference table +BEGIN; + UPDATE on_update_fkey_table SET value_1 = 5 WHERE id != 11; + DELETE FROM referece_table WHERE id = 99; +ERROR: cannot execute DML on reference relation "referece_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- an unrelated update followed by update on dist table and update +-- on reference table +BEGIN; + UPDATE unrelated_dist_table SET value_1 = 15; + UPDATE on_update_fkey_table SET value_1 = 5 WHERE id != 11; + UPDATE referece_table SET id = 101 WHERE id = 99; +ERROR: cannot execute DML on reference relation "referece_table" because there was a parallel DML access to distributed relation "on_update_fkey_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- an unrelated update followed by update on the reference table and update +-- on the cascading distributed table +-- note that the UPDATE on the reference table will try to set the execution +-- mode to sequential, which will fail since there is an already opened +-- parallel connections +BEGIN; + UPDATE unrelated_dist_table SET value_1 = 15; + UPDATE referece_table SET id = 101 WHERE id = 99; +ERROR: cannot modify reference table "referece_table" because there was a parallel operation on a distributed table +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" + UPDATE on_update_fkey_table SET value_1 = 5 WHERE id != 11; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +BEGIN; + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; + DROP TABLE test_table_1 CASCADE; +ROLLBACK; +-- the fails since we're trying to switch sequential mode after +-- already executed a parallel query +BEGIN; + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4" +DEBUG: building index "tt4_pkey" on table "tt4" serially + SELECT create_distributed_table('tt4', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table +DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; +ERROR: current transaction is aborted, commands ignored until end of transaction block + DROP TABLE test_table_1 CASCADE; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- same test with the above, but this time using +-- sequential mode, succeeds +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "tt4_pkey" for table "tt4" +DEBUG: building index "tt4_pkey" on table "tt4" serially + SELECT create_distributed_table('tt4', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; + DROP TABLE test_table_1 CASCADE; +ROLLBACK; +-- another test with ALTER TABLE fails since we're already opened +-- parallel connection via create_distributed_table(), later +-- adding foreign key to reference table fails +BEGIN; + + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); +ERROR: cannot modify table "test_table_2" because there was a parallel operation on a distributed table in the transaction +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; +ERROR: current transaction is aborted, commands ignored until end of transaction block + DROP TABLE test_table_1, test_table_2; +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +-- same test with the above on sequential mode should work fine +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; + DROP TABLE test_table_1, test_table_2; +COMMIT; +-- similar test with the above, but this time the order of +-- create_distributed_table and create_reference_table is +-- changed +BEGIN; + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); +ERROR: cannot modify table "test_table_2" because there was a parallel operation on a distributed table in the transaction +DETAIL: When there is a foreign key to a reference table, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; +ERROR: current transaction is aborted, commands ignored until end of transaction block + DROP TABLE test_table_1 CASCADE; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- same test in sequential mode should succeed +BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; + DROP TABLE test_table_1 CASCADE; +ROLLBACK; +-- again a very similar test, but this time +-- a parallel SELECT is already executed before +-- setting the mode to sequential should fail +BEGIN; + SELECT count(*) FROM on_update_fkey_table; + count +------- + 1001 +(1 row) + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +ERROR: cannot distribute relation "test_table_2" in this transaction because it has a foreign key to a reference table +DETAIL: If a hash distributed table has a foreign key to a reference table, it has to be created in sequential mode before any parallel commands have been executed in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" + CREATE TABLE test_table_1(id int PRIMARY KEY); +ERROR: current transaction is aborted, commands ignored until end of transaction block + SELECT create_reference_table('test_table_1'); +ERROR: current transaction is aborted, commands ignored until end of transaction block + ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id); +ERROR: current transaction is aborted, commands ignored until end of transaction block + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; +ERROR: current transaction is aborted, commands ignored until end of transaction block + DROP TABLE test_table_1 CASCADE; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- make sure that we cannot create hash distributed tables with +-- foreign keys to reference tables when they have data in it +BEGIN; + + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i; + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "test_table_1" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +NOTICE: Copying data from local table... +DEBUG: Copied 101 rows + create_reference_table +------------------------ + +(1 row) + + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty +HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty. + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; +ERROR: current transaction is aborted, commands ignored until end of transaction block + DROP TABLE test_table_2, test_table_1; +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +-- the same test with above in sequential mode would still not work +-- since COPY cannot be executed in sequential mode +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + INSERT INTO test_table_1 SELECT i FROM generate_series(0,100) i; + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i; + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +NOTICE: Copying data from local table... +DEBUG: Copied 101 rows + create_reference_table +------------------------ + +(1 row) + + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +ERROR: cannot distribute "test_table_2" in sequential mode because it is not empty +HINT: If you have manually set citus.multi_shard_modify_mode to 'sequential', try with 'parallel' option. If that is not the case, try distributing local tables when they are empty. + + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; +ERROR: current transaction is aborted, commands ignored until end of transaction block + DROP TABLE test_table_2, test_table_1; +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +-- we should be able to execute and DML/DDL/SELECT after we've +-- switched to sequential via create_distributed_table +BEGIN; + + CREATE TABLE test_table_1(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_1_pkey" for table "test_table_1" +DEBUG: building index "test_table_1_pkey" on table "test_table_1" serially + CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id)); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "test_table_2_pkey" for table "test_table_2" +DEBUG: building index "test_table_2_pkey" on table "test_table_2" serially + SELECT create_reference_table('test_table_1'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "test_table_1" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + + SELECT create_distributed_table('test_table_2', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + + -- and maybe some other test + CREATE INDEX i1 ON test_table_1(id); +DEBUG: building index "i1" on table "test_table_1" serially + ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0); +DEBUG: verifying table "test_table_2" + SELECT count(*) FROM test_table_2; + count +------- + 0 +(1 row) + + SELECT count(*) FROM test_table_1; + count +------- + 0 +(1 row) + + UPDATE test_table_2 SET value_1 = 15; + -- make sure that the output isn't too verbose + SET LOCAL client_min_messages TO ERROR; + DROP TABLE test_table_2, test_table_1; +COMMIT; +-- make sure that modifications to reference tables in a CTE can +-- set the mode to sequential for the next operations +CREATE TABLE reference_table(id int PRIMARY KEY); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table" +DEBUG: building index "reference_table_pkey" on table "reference_table" serially +SELECT create_reference_table('reference_table'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_reference_table +------------------------ + +(1 row) + +CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int); +DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table" +DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially +SELECT create_distributed_table('distributed_table', 'id'); +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57637 +DEBUG: schema "test_fkey_to_ref_in_tx" already exists, skipping +DETAIL: NOTICE from localhost:57638 + create_distributed_table +-------------------------- + +(1 row) + +ALTER TABLE + distributed_table +ADD CONSTRAINT + fkey_delete FOREIGN KEY(value_1) +REFERENCES + reference_table(id) ON DELETE CASCADE; +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +-- this query returns 100 rows in Postgres, but not in Citus +-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion +WITH t1 AS (DELETE FROM reference_table RETURNING id) + DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *; +DEBUG: common table expressions are not supported in distributed modifications +DEBUG: generating subplan 92_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 92 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('92_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + id | value_1 | id +----+---------+---- +(0 rows) + +-- load some more data for one more test with real-time selects +INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode +INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i; +DEBUG: distributed INSERT ... SELECT can only select from distributed tables +DEBUG: Collecting INSERT ... SELECT results on coordinator +-- this query returns 100 rows in Postgres, but not in Citus +-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion +WITH t1 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id; +DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries +DEBUG: generating subplan 96_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 96 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('96_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) +DEBUG: switching to sequential query execution mode +DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode + count +------- + 0 +(1 row) + +-- this query should fail since we first to a parallel access to a distributed table +-- with t1, and then access to t2 +WITH t1 AS (DELETE FROM distributed_table RETURNING id), + t2 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; +DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries +DEBUG: generating subplan 98_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: generating subplan 98_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('98_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) +ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +-- similarly this should fail since we first access to a distributed +-- table via t1, and then access to the reference table in the main query +WITH t1 AS (DELETE FROM distributed_table RETURNING id) + DELETE FROM reference_table RETURNING id; +DEBUG: common table expressions are not supported in distributed modifications +DEBUG: generating subplan 101_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: Plan 101 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +ERROR: cannot execute DML on reference relation "reference_table" because there was a parallel DML access to distributed relation "distributed_table" in the same transaction +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +-- finally, make sure that we can execute the same queries +-- in the sequential mode +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + WITH t1 AS (DELETE FROM distributed_table RETURNING id), + t2 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; +DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries +DEBUG: generating subplan 103_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: generating subplan 103_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id +DEBUG: Plan 103 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('103_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('103_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id)) + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + WITH t1 AS (DELETE FROM distributed_table RETURNING id) + DELETE FROM reference_table RETURNING id; +DEBUG: common table expressions are not supported in distributed modifications +DEBUG: generating subplan 106_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id +DEBUG: Plan 106 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id + id +---- +(0 rows) + +ROLLBACK; +RESET client_min_messages; +DROP SCHEMA test_fkey_to_ref_in_tx CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table referece_table +drop cascades to table on_update_fkey_table +drop cascades to table unrelated_dist_table +drop cascades to table reference_table +drop cascades to table distributed_table +SET search_path TO public; diff --git a/src/test/regress/sql/foreign_key_restriction_enforcement.sql b/src/test/regress/sql/foreign_key_restriction_enforcement.sql index 20d13b282..e323b6a44 100644 --- a/src/test/regress/sql/foreign_key_restriction_enforcement.sql +++ b/src/test/regress/sql/foreign_key_restriction_enforcement.sql @@ -570,6 +570,69 @@ BEGIN; DROP TABLE test_table_2, test_table_1; COMMIT; +-- make sure that modifications to reference tables in a CTE can +-- set the mode to sequential for the next operations +CREATE TABLE reference_table(id int PRIMARY KEY); +SELECT create_reference_table('reference_table'); + +CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int); +SELECT create_distributed_table('distributed_table', 'id'); + +ALTER TABLE + distributed_table +ADD CONSTRAINT + fkey_delete FOREIGN KEY(value_1) +REFERENCES + reference_table(id) ON DELETE CASCADE; + +INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i; +INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i; + +-- this query returns 100 rows in Postgres, but not in Citus +-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion +WITH t1 AS (DELETE FROM reference_table RETURNING id) + DELETE FROM distributed_table USING t1 WHERE value_1 = t1.id RETURNING *; + +-- load some more data for one more test with real-time selects +INSERT INTO reference_table SELECT i FROM generate_series(0, 10) i; +INSERT INTO distributed_table SELECT i, i % 10 FROM generate_series(0, 100) i; + +-- this query returns 100 rows in Postgres, but not in Citus +-- see https://github.com/citusdata/citus_docs/issues/664 for the discussion +WITH t1 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1 WHERE value_1 = t1.id; + +-- this query should fail since we first to a parallel access to a distributed table +-- with t1, and then access to t2 +WITH t1 AS (DELETE FROM distributed_table RETURNING id), + t2 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; + +-- similarly this should fail since we first access to a distributed +-- table via t1, and then access to the reference table in the main query +WITH t1 AS (DELETE FROM distributed_table RETURNING id) + DELETE FROM reference_table RETURNING id; + + +-- finally, make sure that we can execute the same queries +-- in the sequential mode +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + + WITH t1 AS (DELETE FROM distributed_table RETURNING id), + t2 AS (DELETE FROM reference_table RETURNING id) + SELECT count(*) FROM distributed_table, t1, t2 WHERE value_1 = t1.id AND value_1 = t2.id; +ROLLBACK; + +BEGIN; + + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + + WITH t1 AS (DELETE FROM distributed_table RETURNING id) + DELETE FROM reference_table RETURNING id; +ROLLBACK; + RESET client_min_messages; DROP SCHEMA test_fkey_to_ref_in_tx CASCADE;