When replication factor > 1, all modifications are done via 2PC (#5379)

With Citus 9.0, we introduced `citus.single_shard_commit_protocol` which
defaults to 2PC.

With this commit, we prevent any user to set it to 1PC and drop support
for `citus.single_shard_commit_protocol`.

Although this might add some overhead for users, it is already the default
behaviour (so less likely) and marking placements as INVALID is much
worse.
decrease/resourceSize
Önder Kalacı 2021-10-20 10:39:03 +02:00 committed by GitHub
parent a851211dbc
commit 3f726c72e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 631 additions and 614 deletions

View File

@ -1416,34 +1416,12 @@ DistributedExecutionRequiresRollback(List *taskList)
if (list_length(task->taskPlacementList) > 1)
{
if (SingleShardCommitProtocol == COMMIT_PROTOCOL_2PC)
{
/*
* Adaptive executor opts to error out on queries if a placement is unhealthy,
* not marking the placement itself unhealthy in the process.
* Use 2PC to rollback placements before the unhealthy replica failed.
*/
return true;
}
/*
* Some tasks don't set replicationModel thus we only
* rely on the anchorShardId, not replicationModel.
*
* TODO: Do we ever need replicationModel in the Task structure?
* Can't we always rely on anchorShardId?
* Single DML/DDL tasks with replicated tables (including
* reference and non-reference tables) should require
* BEGIN/COMMIT/ROLLBACK.
*/
if (task->anchorShardId != INVALID_SHARD_ID && ReferenceTableShardId(
task->anchorShardId))
{
return true;
}
/*
* Single DML/DDL tasks with replicated tables (non-reference)
* should not require BEGIN/COMMIT/ROLLBACK.
*/
return false;
return true;
}
return false;
@ -1467,21 +1445,13 @@ TaskListRequires2PC(List *taskList)
}
Task *task = (Task *) linitial(taskList);
if (task->replicationModel == REPLICATION_MODEL_2PC)
{
return true;
}
/*
* Some tasks don't set replicationModel thus we rely on
* the anchorShardId as well replicationModel.
*
* TODO: Do we ever need replicationModel in the Task structure?
* Can't we always rely on anchorShardId?
*/
uint64 anchorShardId = task->anchorShardId;
if (anchorShardId != INVALID_SHARD_ID && ReferenceTableShardId(anchorShardId))
if (list_length(task->taskPlacementList) > 1)
{
/*
* Even single DML/DDL tasks with replicated tables
* (including reference and non-reference tables)
* should require BEGIN/COMMIT/ROLLBACK.
*/
return true;
}

View File

@ -298,6 +298,47 @@ EnsureModificationsCanRun(void)
}
/*
* EnsureModificationsCanRunOnRelation firsts calls into EnsureModificationsCanRun() and
* then does one more additional check. The additional check is to give a proper error
* message if any relation that is modified is replicated, as replicated tables use
* 2PC and 2PC cannot happen when recovery is in progress.
*/
void
EnsureModificationsCanRunOnRelation(Oid relationId)
{
EnsureModificationsCanRun();
/*
* Even if user allows writes from standby, we should not allow for
* replicated tables as they require 2PC. And, 2PC needs to write a log
* record on the coordinator.
*/
if (!(RecoveryInProgress() && WritableStandbyCoordinator))
{
return;
}
if (!IsCitusTable(relationId))
{
/* we are not interested in PG tables */
return;
}
if (IsCitusTableType(relationId, REFERENCE_TABLE) ||
!SingleReplicatedTable(relationId))
{
ereport(ERROR, (errmsg("writing to worker nodes is not currently "
"allowed for replicated tables such as reference "
"tables or hash distributed tables with replication "
"factor greater than 1."),
errhint("All modifications to replicated tables happen via 2PC, "
"and 2PC requires the database to be in a writable state."),
errdetail("the database is read-only")));
}
}
/*
* IsCitusTableType returns true if the given table with relationId
* belongs to a citus table that matches the given table type. If cache

View File

@ -847,9 +847,10 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi
if (IsModifyCommand(originalQuery))
{
EnsureModificationsCanRun();
Oid targetRelationId = ModifyQueryResultRelationId(query);
EnsureModificationsCanRunOnRelation(targetRelationId);
EnsurePartitionTableNotReplicated(targetRelationId);
if (InsertSelectIntoCitusTable(originalQuery))

View File

@ -1643,21 +1643,6 @@ RegisterCitusConfigVariables(void)
GUC_STANDARD,
NULL, NULL, NULL);
DefineCustomEnumVariable(
"citus.single_shard_commit_protocol",
gettext_noop(
"Sets the commit protocol for commands modifying a single shards with multiple replicas."),
gettext_noop("When a failure occurs during commands that modify multiple "
"replicas, two-phase commit is required to ensure data is never lost "
"and this is the default. However, changing to 1pc may give small "
"performance benefits."),
&SingleShardCommitProtocol,
COMMIT_PROTOCOL_2PC,
shard_commit_protocol_options,
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citus.sort_returning",
gettext_noop("Sorts the RETURNING clause to get consistent test output"),

View File

@ -48,7 +48,6 @@ CoordinatedTransactionState CurrentCoordinatedTransactionState = COORD_TRANS_NON
/* GUC, the commit protocol to use for commands affecting more than one connection */
int MultiShardCommitProtocol = COMMIT_PROTOCOL_2PC;
int SingleShardCommitProtocol = COMMIT_PROTOCOL_2PC;
int SavedMultiShardCommitProtocol = COMMIT_PROTOCOL_BARE;
/*

View File

@ -196,6 +196,7 @@ extern bool InstalledAndAvailableVersionsSame(void);
extern bool MajorVersionsCompatible(char *leftVersion, char *rightVersion);
extern void ErrorIfInconsistentShardIntervals(CitusTableCacheEntry *cacheEntry);
extern void EnsureModificationsCanRun(void);
extern void EnsureModificationsCanRunOnRelation(Oid relationId);
extern char LookupDistributionMethod(Oid distributionMethodOid);
extern bool RelationExists(Oid relationId);
extern ShardInterval * TupleToShardInterval(HeapTuple heapTuple,

View File

@ -119,6 +119,8 @@ s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\
# assign_distributed_transaction id params
s/(NOTICE.*)assign_distributed_transaction_id\([0-9]+, [0-9]+, '.*'\)/\1assign_distributed_transaction_id\(xx, xx, 'xxxxxxx'\)/g
s/(NOTICE.*)PREPARE TRANSACTION 'citus_[0-9]+_[0-9]+_[0-9]+_[0-9]+'/\1PREPARE TRANSACTION 'citus_xx_xx_xx_xx'/g
s/(NOTICE.*)COMMIT PREPARED 'citus_[0-9]+_[0-9]+_[0-9]+_[0-9]+'/\1COMMIT PREPARED 'citus_xx_xx_xx_xx'/g
# toast tables
s/pg_toast_[0-9]+/pg_toast_xxxxx/g

View File

@ -174,9 +174,9 @@ SELECT citus.mitmproxy('conn.delay(500)');
SELECT count(*) FROM single_replicatated;
ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms
SET citus.force_max_query_parallelization TO OFF;
-- one similar test, but this time on modification queries
-- one similar test, and this time on modification queries
-- to see that connection establishement failures could
-- mark placement INVALID
-- fail the transaction (but not mark any placements as INVALID)
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -203,6 +203,7 @@ SELECT citus.mitmproxy('conn.delay(500)');
(1 row)
INSERT INTO products VALUES (100, '100', 100);
ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms
COMMIT;
SELECT
count(*) as invalid_placement_count
@ -213,14 +214,14 @@ WHERE
shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass);
invalid_placement_count
---------------------------------------------------------------------
1
0
(1 row)
-- show that INSERT went through
-- show that INSERT failed
SELECT count(*) FROM products WHERE product_no = 100;
count
---------------------------------------------------------------------
1
0
(1 row)
RESET client_min_messages;

View File

@ -374,7 +374,7 @@ SELECT create_distributed_table('dml_test', 'id');
COPY dml_test FROM STDIN WITH CSV;
---- test multiple statements against a single shard, but with two placements
-- fail at COMMIT (actually COMMIT this time, as no 2pc in use)
-- fail at PREPARED COMMIT as we use 2PC
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
mitmproxy
---------------------------------------------------------------------
@ -400,7 +400,27 @@ WARNING: server closed the connection unexpectedly
connection not open
connection not open
CONTEXT: while executing command on localhost:xxxxx
--- should see all changes, but they only went to one placement (other is unhealthy)
-- all changes should be committed because we injected
-- the failure on the COMMIT time. And, we should not
-- mark any placements as INVALID
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT recover_prepared_transactions();
recover_prepared_transactions
---------------------------------------------------------------------
1
(1 row)
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
shardid
---------------------------------------------------------------------
(0 rows)
SET citus.task_assignment_policy TO "round-robin";
SELECT * FROM dml_test ORDER BY id ASC;
id | name
---------------------------------------------------------------------
@ -409,18 +429,15 @@ SELECT * FROM dml_test ORDER BY id ASC;
5 | Epsilon
(3 rows)
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
shardid
SELECT * FROM dml_test ORDER BY id ASC;
id | name
---------------------------------------------------------------------
103402
(1 row)
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
3 | gamma
4 | Delta
5 | Epsilon
(3 rows)
RESET citus.task_assignment_policy;
-- drop table and recreate as reference table
DROP TABLE dml_test;
SET citus.shard_count = 2;

View File

@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO partitioned_table VALUES (0, 0);
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
-- use both placements
@ -37,25 +37,25 @@ SET citus.task_assignment_policy TO "round-robin";
SELECT count(*) FROM partitioned_table_0;
count
---------------------------------------------------------------------
2
1
(1 row)
SELECT count(*) FROM partitioned_table_0;
count
---------------------------------------------------------------------
2
1
(1 row)
SELECT count(*) FROM partitioned_table;
count
---------------------------------------------------------------------
2
1
(1 row)
SELECT count(*) FROM partitioned_table;
count
---------------------------------------------------------------------
2
1
(1 row)
-- ==== Clean up, we're done here ====

View File

@ -27,27 +27,26 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO mod_test VALUES (2, 6);
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
SELECT COUNT(*) FROM mod_test WHERE key=2;
count
---------------------------------------------------------------------
1
0
(1 row)
-- some clean up
-- none of the placements are marked as INACTIVE
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
) AND shardstate = 3 RETURNING placementid;
placementid
---------------------------------------------------------------------
125
(1 row)
(0 rows)
TRUNCATE mod_test;
-- verify behavior of UPDATE ... RETURNING; should mark as failed
-- verify behavior of UPDATE ... RETURNING; should fail the transaction
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -62,33 +61,27 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
(1 row)
UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key;
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
key
---------------------------------------------------------------------
2
(1 row)
SELECT COUNT(*) FROM mod_test WHERE value='ok';
count
---------------------------------------------------------------------
1
0
(1 row)
-- some clean up
-- none of the placements are marked as INACTIVE
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
) AND shardstate = 3 RETURNING placementid;
placementid
---------------------------------------------------------------------
125
(1 row)
(0 rows)
TRUNCATE mod_test;
-- verify behavior of multi-statement modifications to a single shard
-- should succeed but mark a placement as failed
-- should fail the transaction and never mark placements inactive
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
mitmproxy
---------------------------------------------------------------------
@ -100,25 +93,24 @@ INSERT INTO mod_test VALUES (2, 6);
INSERT INTO mod_test VALUES (2, 7);
DELETE FROM mod_test WHERE key=2 AND value = '7';
UPDATE mod_test SET value='ok' WHERE key=2;
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
COMMIT;
SELECT COUNT(*) FROM mod_test WHERE key=2;
count
---------------------------------------------------------------------
1
0
(1 row)
-- some clean up
-- none of the placements are marked as INACTIVE
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
) AND shardstate = 3 RETURNING placementid;
placementid
---------------------------------------------------------------------
125
(1 row)
(0 rows)
TRUNCATE mod_test;
-- ==== Clean up, we're done here ====

View File

@ -45,7 +45,8 @@ WARNING: connection to the remote node localhost:xxxxx failed with the followin
3 | test data
(1 row)
-- kill after first SELECT; txn should work (though placement marked bad)
-- kill after first SELECT; txn should fail as INSERT triggers
-- 2PC (and placementis not marked bad)
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
mitmproxy
---------------------------------------------------------------------
@ -55,33 +56,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
BEGIN;
INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 3;
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
key | value
---------------------------------------------------------------------
3 | test data
3 | more data
(2 rows)
INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
key | value
---------------------------------------------------------------------
3 | test data
3 | more data
3 | even more data
(3 rows)
COMMIT;
-- some clean up
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'select_test'::regclass
);
TRUNCATE select_test;
-- now the same tests with query cancellation
-- put data in shard for which mitm node is first placement
@ -139,7 +117,7 @@ INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
ERROR: canceling statement due to user request
COMMIT;
-- error after second SELECT; txn should work (though placement marked bad)
-- error after second SELECT; txn should fails the transaction
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()');
mitmproxy
---------------------------------------------------------------------
@ -156,15 +134,9 @@ SELECT * FROM select_test WHERE key = 3;
INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
key | value
---------------------------------------------------------------------
3 | more data
3 | even more data
(2 rows)
COMMIT;
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()');
mitmproxy

View File

@ -41,7 +41,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
(1 row)
ANALYZE vacuum_test;
WARNING: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
@ -51,19 +51,38 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
(1 row)
ANALYZE vacuum_test;
-- ANALYZE transactions being critical is an open question, see #2430
-- show that we marked as INVALID on COMMIT FAILURE
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
shardid | shardstate
WARNING: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
connection not open
connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
12000000 | 3
(1 row)
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass
);
SELECT recover_prepared_transactions();
recover_prepared_transactions
---------------------------------------------------------------------
1
(1 row)
-- ANALYZE transactions being critical is an open question, see #2430
-- show that we never mark as INVALID on COMMIT FAILURE
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
shardid | shardstate
---------------------------------------------------------------------
(0 rows)
-- the same tests with cancel
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')');
mitmproxy

View File

@ -1,4 +1,13 @@
\c - - - :master_port
SET citus.shard_replication_factor TO 2;
CREATE TABLE the_replicated_table (a int, b int, z bigserial);
SELECT create_distributed_table('the_replicated_table', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.shard_replication_factor TO 1;
CREATE TABLE the_table (a int, b int, z bigserial);
SELECT create_distributed_table('the_table', 'a');
create_distributed_table
@ -39,10 +48,14 @@ INSERT INTO citus_local_table (a, b, z) VALUES (1, 2, 2);
ERROR: writing to worker nodes is not currently allowed
DETAIL: the database is read-only
-- We can allow DML on a writable standby coordinator.
-- Note that it doesn't help to enable writes for citus local tables
-- and coordinator replicated reference tables. This is because, the
-- data is in the coordinator and will hit read-only tranaction checks
-- on Postgres
-- Note that it doesn't help to enable writes for
-- (a) citus local tables
-- (b) coordinator replicated reference tables.
-- (c) reference tables or replication > 1 distributed tables
-- (a) and (b) is because the data is in the coordinator and will hit
-- read-only tranaction checks on Postgres
-- (c) is because citus uses 2PC, where a transaction record should
-- be inserted to pg_dist_node, which is not allowed
SET citus.writable_standby_coordinator TO on;
INSERT INTO the_table (a, b, z) VALUES (1, 2, 2);
SELECT * FROM the_table;
@ -51,8 +64,19 @@ SELECT * FROM the_table;
1 | 2 | 2
(1 row)
INSERT INTO the_replicated_table (a, b, z) VALUES (1, 2, 2);
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
SELECT * FROM the_replicated_table;
a | b | z
---------------------------------------------------------------------
(0 rows)
INSERT INTO reference_table (a, b, z) VALUES (1, 2, 2);
ERROR: cannot execute INSERT in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
SELECT * FROM reference_table;
a | b | z
---------------------------------------------------------------------
@ -66,8 +90,14 @@ SELECT * FROM citus_local_table;
(0 rows)
UPDATE the_table SET z = 3 WHERE a = 1;
UPDATE the_replicated_table SET z = 3 WHERE a = 1;
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
UPDATE reference_table SET z = 3 WHERE a = 1;
ERROR: cannot execute UPDATE in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
UPDATE citus_local_table SET z = 3 WHERE a = 1;
ERROR: cannot execute UPDATE in a read-only transaction
SELECT * FROM the_table;
@ -87,8 +117,14 @@ SELECT * FROM citus_local_table;
(0 rows)
DELETE FROM the_table WHERE a = 1;
DELETE FROM the_replicated_table WHERE a = 1;
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
DELETE FROM reference_table WHERE a = 1;
ERROR: cannot execute DELETE in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
DELETE FROM citus_local_table WHERE a = 1;
ERROR: cannot execute DELETE in a read-only transaction
SELECT * FROM the_table;
@ -109,20 +145,35 @@ SELECT * FROM citus_local_table;
-- drawing from a sequence is not possible
INSERT INTO the_table (a, b) VALUES (1, 2);
ERROR: cannot assign TransactionIds during recovery
INSERT INTO the_replicated_table (a, b) VALUES (1, 2);
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
INSERT INTO reference_table (a, b) VALUES (1, 2);
ERROR: cannot assign TransactionIds during recovery
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
INSERT INTO citus_local_table (a, b) VALUES (1, 2);
ERROR: cannot assign TransactionIds during recovery
-- 2PC is not possible
INSERT INTO the_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
ERROR: cannot assign TransactionIds during recovery
INSERT INTO the_replicated_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
INSERT INTO reference_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
ERROR: cannot execute INSERT in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
INSERT INTO citus_local_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
ERROR: cannot execute INSERT in a read-only transaction
-- COPY is not possible in 2PC mode
COPY the_table (a, b, z) FROM STDIN WITH CSV;
ERROR: cannot assign TransactionIds during recovery
-- COPY is not possible in 2PC mode
COPY the_replicated_table (a, b, z) FROM STDIN WITH CSV;
ERROR: cannot assign TransactionIds during recovery
COPY reference_table (a, b, z) FROM STDIN WITH CSV;
ERROR: cannot assign TransactionIds during recovery
COPY citus_local_table (a, b, z) FROM STDIN WITH CSV;
@ -138,7 +189,9 @@ SELECT * FROM the_table ORDER BY a;
(2 rows)
INSERT INTO reference_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
ERROR: cannot execute INSERT in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
SELECT * FROM reference_table ORDER BY a;
a | b | z
---------------------------------------------------------------------
@ -162,7 +215,14 @@ SELECT * FROM del ORDER BY a;
WITH del AS (DELETE FROM reference_table RETURNING *)
SELECT * FROM del ORDER BY a;
ERROR: cannot execute DELETE in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
WITH del AS (DELETE FROM the_replicated_table RETURNING *)
SELECT * FROM del ORDER BY a;
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
WITH del AS (DELETE FROM citus_local_table RETURNING *)
SELECT * FROM del ORDER BY a;
ERROR: cannot execute DELETE in a read-only transaction
@ -192,7 +252,9 @@ SELECT * FROM citus_local_table ORDER BY a;
DELETE FROM the_table;
DELETE FROM reference_table;
ERROR: cannot execute DELETE in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
DELETE FROM citus_local_table;
ERROR: cannot execute DELETE in a read-only transaction
-- DDL is not possible
@ -217,7 +279,9 @@ INSERT INTO the_table (a, b, z) VALUES (1, 2, 2);
ROLLBACK;
BEGIN;
INSERT INTO reference_table (a, b, z) VALUES (1, 2, 2);
ERROR: cannot execute INSERT in a read-only transaction
ERROR: writing to worker nodes is not currently allowed for replicated tables such as reference tables or hash distributed tables with replication factor greater than 1.
DETAIL: the database is read-only
HINT: All modifications to replicated tables happen via 2PC, and 2PC requires the database to be in a writable state.
ROLLBACK;
BEGIN;
INSERT INTO citus_local_table (a, b, z) VALUES (1, 2, 2);

View File

@ -369,7 +369,8 @@ ORDER BY nodeport, shardid;
-- hide postgresql version dependend messages for next test only
\set VERBOSITY terse
-- deferred check should abort the transaction
-- for replicated tables use 2PC even if multi-shard commit protocol
-- is set to 2PC
BEGIN;
SET LOCAL citus.multi_shard_commit_protocol TO '1pc';
DELETE FROM researchers WHERE lab_id = 6;
@ -377,11 +378,8 @@ DELETE FROM researchers WHERE lab_id = 6;
\copy researchers FROM STDIN delimiter ','
COMMIT;
WARNING: illegal value
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: illegal value
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
ERROR: could not commit transaction on any active node
WARNING: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress
ERROR: illegal value
\unset VERBOSITY
-- verify everyhing including delete is rolled back
SELECT * FROM researchers WHERE lab_id = 6;
@ -614,21 +612,20 @@ DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE reject_bad();
\c - - - :master_port
-- should be the same story as before, just at COMMIT time
-- as we use 2PC, the transaction is rollbacked
BEGIN;
INSERT INTO objects VALUES (1, 'apple');
INSERT INTO objects VALUES (2, 'BAD');
INSERT INTO labs VALUES (9, 'Umbrella Corporation');
COMMIT;
WARNING: illegal value
WARNING: failed to commit transaction on localhost:xxxxx
-- data should be persisted
ERROR: illegal value
-- data should not persisted
SELECT * FROM objects WHERE id = 2;
id | name
---------------------------------------------------------------------
2 | BAD
(1 row)
(0 rows)
-- but one placement should be bad
-- and nonne of the placements should be bad
SELECT count(*)
FROM pg_dist_shard_placement AS sp,
pg_dist_shard AS s
@ -639,7 +636,7 @@ AND sp.shardstate = 3
AND s.logicalrelid = 'objects'::regclass;
count
---------------------------------------------------------------------
1
0
(1 row)
DELETE FROM objects;
@ -663,12 +660,8 @@ INSERT INTO labs VALUES (8, 'Aperture Science');
INSERT INTO labs VALUES (9, 'BAD');
COMMIT;
WARNING: illegal value
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: illegal value
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
WARNING: could not commit transaction for shard xxxxx on any active node
ERROR: could not commit transaction on any active node
WARNING: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress
ERROR: illegal value
-- data should NOT be persisted
SELECT * FROM objects WHERE id = 1;
id | name
@ -703,23 +696,20 @@ INSERT INTO objects VALUES (1, 'apple');
INSERT INTO labs VALUES (8, 'Aperture Science');
INSERT INTO labs VALUES (9, 'BAD');
COMMIT;
WARNING: illegal value
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
ERROR: illegal value
\set VERBOSITY default
-- data to objects should be persisted, but labs should not...
-- none of the changes should be persisted
SELECT * FROM objects WHERE id = 1;
id | name
---------------------------------------------------------------------
1 | apple
(1 row)
(0 rows)
SELECT * FROM labs WHERE id = 8;
id | name
---------------------------------------------------------------------
(0 rows)
-- labs should be healthy, but one object placement shouldn't be
-- all placements should be healthy
SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*)
FROM pg_dist_shard_placement AS sp,
pg_dist_shard AS s
@ -731,9 +721,8 @@ ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count
---------------------------------------------------------------------
labs | 1 | 1
objects | 1 | 1
objects | 3 | 1
(3 rows)
objects | 1 | 2
(2 rows)
-- some append-partitioned tests for good measure
CREATE TABLE append_researchers ( LIKE researchers );

View File

@ -2448,28 +2448,14 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user;
\c - router_user - :master_port
-- first test that it is marked invalid inside a transaction block
-- we will fail to connect to worker 2, since the user does not exist
-- still, we never mark placements inactive. Instead, fail the transaction
BEGIN;
INSERT INTO failure_test VALUES (1, 1);
WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard
WHERE logicalrelid = 'failure_test'::regclass
)
ORDER BY placementid;
shardid | shardstate | nodename | nodeport
---------------------------------------------------------------------
840017 | 1 | localhost | 57637
840017 | 3 | localhost | 57638
840018 | 1 | localhost | 57638
840018 | 1 | localhost | 57637
(4 rows)
ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist
ROLLBACK;
INSERT INTO failure_test VALUES (2, 1);
WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist
ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard
@ -2480,7 +2466,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement
---------------------------------------------------------------------
840017 | 1 | localhost | 57637
840017 | 1 | localhost | 57638
840018 | 3 | localhost | 57638
840018 | 1 | localhost | 57638
840018 | 1 | localhost | 57637
(4 rows)

View File

@ -136,12 +136,18 @@ SELECT recover_prepared_transactions();
0
(1 row)
-- plain INSERT does not use 2PC
-- plain INSERT uses 2PC
INSERT INTO test_recovery VALUES ('hello');
SELECT count(*) FROM pg_dist_transaction;
count
---------------------------------------------------------------------
0
2
(1 row)
SELECT recover_prepared_transactions();
recover_prepared_transactions
---------------------------------------------------------------------
0
(1 row)
-- Aborted DDL commands should not write transaction recovery records

View File

@ -196,36 +196,11 @@ SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
-- run VACUUM and ANALYZE against the table on the master
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
VACUUM dustbunnies;
NOTICE: issuing VACUUM public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ANALYZE dustbunnies;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- send a VACUUM FULL and a VACUUM ANALYZE
VACUUM (FULL) dustbunnies;
NOTICE: issuing VACUUM (FULL) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM ANALYZE dustbunnies;
NOTICE: issuing VACUUM (ANALYZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
\c - - :public_worker_1_host :worker_1_port
-- disable auto-VACUUM for next test
ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false);
@ -233,25 +208,8 @@ SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::r
\gset
-- send a VACUUM FREEZE after adding a new row
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (5, 'peter');
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO public.dustbunnies_990002 (id, name) VALUES (5, 'peter'::text)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO public.dustbunnies_990002 (id, name) VALUES (5, 'peter'::text)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FREEZE) dustbunnies;
NOTICE: issuing VACUUM (FREEZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FREEZE) public.dustbunnies_990002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- verify that relfrozenxid increased
\c - - :public_worker_1_host :worker_1_port
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
@ -273,33 +231,8 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
-- add NULL values, then perform column-specific ANALYZE
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO public.dustbunnies_990002 (id, name, age) VALUES (6, NULL::text, NULL::integer)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing INSERT INTO public.dustbunnies_990002 (id, name, age) VALUES (6, NULL::text, NULL::integer)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ANALYZE dustbunnies (name);
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.dustbunnies_990002 (name)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.dustbunnies_990002 (name)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- verify that name's NULL ratio is updated but age's is not
\c - - :public_worker_1_host :worker_1_port
SELECT attname, null_frac FROM pg_stats

View File

@ -7,6 +7,7 @@ SELECT substring(:'server_version', '\d+')::int > 13 AS server_version_above_thi
\endif
create schema pg14;
set search_path to pg14;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 980000;
SET citus.shard_count TO 2;
-- test the new vacuum option, process_toast
@ -21,28 +22,16 @@ SET citus.log_remote_commands TO ON;
VACUUM (FULL) t1;
NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST true) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST false) t1;
@ -50,10 +39,6 @@ ERROR: PROCESS_TOAST required with VACUUM FULL
VACUUM (PROCESS_TOAST false) t1;
NOTICE: issuing VACUUM pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands TO OFF;
@ -74,17 +59,29 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(TABLESPACE test_tablespace) index idx;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(TABLESPACE test_tablespace) index idx;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
reindex(TABLESPACE test_tablespace, verbose) index idx;
INFO: index "idx" was reindexed
@ -92,34 +89,58 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(TABLESPACE test_tablespace, verbose) index idx;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(TABLESPACE test_tablespace, verbose) index idx;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
reindex(TABLESPACE test_tablespace, verbose false) index idx ;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(TABLESPACE test_tablespace, verbose false) index idx ;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(TABLESPACE test_tablespace, verbose false) index idx ;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
reindex(verbose, TABLESPACE test_tablespace) index idx ;
INFO: index "idx" was reindexed
@ -127,17 +148,29 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET search_path TO pg14;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(verbose, TABLESPACE test_tablespace) index idx ;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing reindex(verbose, TABLESPACE test_tablespace) index idx ;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should error saying table space doesn't exist
reindex(TABLESPACE test_tablespace1) index idx;

View File

@ -115,7 +115,8 @@ SELECT distributed_2PCs_are_equal_to_placement_count();
t
(1 row)
-- with 1PC, we should not see and distributed TXs in the pg_dist_transaction
-- even if 1PC used, we use 2PC as we modify replicated tables
-- see distributed TXs in the pg_dist_transaction
SET citus.multi_shard_commit_protocol TO '1pc';
SET citus.multi_shard_modify_mode TO 'sequential';
SELECT recover_prepared_transactions();
@ -128,7 +129,7 @@ ALTER TABLE test_table ADD CONSTRAINT c_check CHECK(a > 0);
SELECT no_distributed_2PCs();
no_distributed_2pcs
---------------------------------------------------------------------
t
f
(1 row)
SET citus.multi_shard_commit_protocol TO '1pc';
@ -143,7 +144,7 @@ ALTER TABLE test_table ADD CONSTRAINT d_check CHECK(a > 0);
SELECT no_distributed_2PCs();
no_distributed_2pcs
---------------------------------------------------------------------
t
f
(1 row)
CREATE TABLE ref_test(a int);
@ -194,7 +195,7 @@ SELECT create_distributed_table('test_table_rep_2', 'a');
(1 row)
-- 1PC should never use 2PC with rep > 1
-- even if 1PC used, we use 2PC with rep > 1
SET citus.multi_shard_commit_protocol TO '1pc';
SET citus.multi_shard_modify_mode TO 'sequential';
SELECT recover_prepared_transactions();
@ -207,7 +208,7 @@ CREATE INDEX test_table_rep_2_i_1 ON test_table_rep_2(a);
SELECT no_distributed_2PCs();
no_distributed_2pcs
---------------------------------------------------------------------
t
f
(1 row)
SET citus.multi_shard_modify_mode TO 'parallel';
@ -221,7 +222,7 @@ CREATE INDEX test_table_rep_2_i_2 ON test_table_rep_2(a);
SELECT no_distributed_2PCs();
no_distributed_2pcs
---------------------------------------------------------------------
t
f
(1 row)
-- 2PC should always use 2PC with rep > 1

View File

@ -308,14 +308,15 @@ COMMIT;
-- Nothing from the block should have committed
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1;
-- Now try with 2pc off
-- Even if 1PC is picked for multi-shard commands
-- Citus always uses 2PC for replication > 1
SET citus.multi_shard_commit_protocol TO '1pc';
BEGIN;
CREATE INDEX single_index_2 ON single_shard_items(id);
CREATE INDEX single_index_3 ON single_shard_items(name);
COMMIT;
-- The block should have committed with a warning
-- Nothing from the block should have committed
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1;
\c - - - :worker_2_port

File diff suppressed because it is too large Load Diff

View File

@ -97,9 +97,9 @@ SELECT count(*) FROM single_replicatated;
SET citus.force_max_query_parallelization TO OFF;
-- one similar test, but this time on modification queries
-- one similar test, and this time on modification queries
-- to see that connection establishement failures could
-- mark placement INVALID
-- fail the transaction (but not mark any placements as INVALID)
SELECT citus.mitmproxy('conn.allow()');
BEGIN;
SELECT
@ -120,7 +120,7 @@ WHERE
shardstate = 3 AND
shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass);
-- show that INSERT went through
-- show that INSERT failed
SELECT count(*) FROM products WHERE product_no = 100;

View File

@ -210,7 +210,7 @@ COPY dml_test FROM STDIN WITH CSV;
---- test multiple statements against a single shard, but with two placements
-- fail at COMMIT (actually COMMIT this time, as no 2pc in use)
-- fail at PREPARED COMMIT as we use 2PC
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
BEGIN;
@ -221,14 +221,19 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1;
UPDATE dml_test SET name = 'gamma' WHERE id = 3;
COMMIT;
--- should see all changes, but they only went to one placement (other is unhealthy)
SELECT * FROM dml_test ORDER BY id ASC;
-- all changes should be committed because we injected
-- the failure on the COMMIT time. And, we should not
-- mark any placements as INVALID
SELECT citus.mitmproxy('conn.allow()');
SELECT recover_prepared_transactions();
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
SELECT citus.mitmproxy('conn.allow()');
SET citus.task_assignment_policy TO "round-robin";
SELECT * FROM dml_test ORDER BY id ASC;
SELECT * FROM dml_test ORDER BY id ASC;
RESET citus.task_assignment_policy;
-- drop table and recreate as reference table
DROP TABLE dml_test;
SET citus.shard_count = 2;
SET citus.shard_replication_factor = 1;

View File

@ -13,14 +13,14 @@ INSERT INTO mod_test VALUES (2, 6);
SELECT COUNT(*) FROM mod_test WHERE key=2;
-- some clean up
-- none of the placements are marked as INACTIVE
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
) AND shardstate = 3 RETURNING placementid;
TRUNCATE mod_test;
-- verify behavior of UPDATE ... RETURNING; should mark as failed
-- verify behavior of UPDATE ... RETURNING; should fail the transaction
SELECT citus.mitmproxy('conn.allow()');
INSERT INTO mod_test VALUES (2, 6);
@ -29,7 +29,7 @@ UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key;
SELECT COUNT(*) FROM mod_test WHERE value='ok';
-- some clean up
-- none of the placements are marked as INACTIVE
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
@ -37,7 +37,7 @@ WHERE shardid IN (
TRUNCATE mod_test;
-- verify behavior of multi-statement modifications to a single shard
-- should succeed but mark a placement as failed
-- should fail the transaction and never mark placements inactive
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
BEGIN;
@ -49,7 +49,7 @@ COMMIT;
SELECT COUNT(*) FROM mod_test WHERE key=2;
-- some clean up
-- none of the placements are marked as INACTIVE
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass

View File

@ -14,21 +14,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
SELECT * FROM select_test WHERE key = 3;
SELECT * FROM select_test WHERE key = 3;
-- kill after first SELECT; txn should work (though placement marked bad)
-- kill after first SELECT; txn should fail as INSERT triggers
-- 2PC (and placementis not marked bad)
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
BEGIN;
INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 3;
INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
COMMIT;
-- some clean up
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'select_test'::regclass
);
TRUNCATE select_test;
-- now the same tests with query cancellation
@ -66,7 +60,7 @@ INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
COMMIT;
-- error after second SELECT; txn should work (though placement marked bad)
-- error after second SELECT; txn should fails the transaction
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()');
BEGIN;

View File

@ -24,16 +24,14 @@ ANALYZE vacuum_test;
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
ANALYZE vacuum_test;
SELECT citus.mitmproxy('conn.allow()');
SELECT recover_prepared_transactions();
-- ANALYZE transactions being critical is an open question, see #2430
-- show that we marked as INVALID on COMMIT FAILURE
-- show that we never mark as INVALID on COMMIT FAILURE
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
UPDATE pg_dist_shard_placement SET shardstate = 1
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass
);
-- the same tests with cancel
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')');
VACUUM vacuum_test;

View File

@ -1,5 +1,10 @@
\c - - - :master_port
SET citus.shard_replication_factor TO 2;
CREATE TABLE the_replicated_table (a int, b int, z bigserial);
SELECT create_distributed_table('the_replicated_table', 'a');
SET citus.shard_replication_factor TO 1;
CREATE TABLE the_table (a int, b int, z bigserial);
SELECT create_distributed_table('the_table', 'a');
@ -21,20 +26,27 @@ INSERT INTO reference_table (a, b, z) VALUES (1, 2, 2);
INSERT INTO citus_local_table (a, b, z) VALUES (1, 2, 2);
-- We can allow DML on a writable standby coordinator.
-- Note that it doesn't help to enable writes for citus local tables
-- and coordinator replicated reference tables. This is because, the
-- data is in the coordinator and will hit read-only tranaction checks
-- on Postgres
-- Note that it doesn't help to enable writes for
-- (a) citus local tables
-- (b) coordinator replicated reference tables.
-- (c) reference tables or replication > 1 distributed tables
-- (a) and (b) is because the data is in the coordinator and will hit
-- read-only tranaction checks on Postgres
-- (c) is because citus uses 2PC, where a transaction record should
-- be inserted to pg_dist_node, which is not allowed
SET citus.writable_standby_coordinator TO on;
INSERT INTO the_table (a, b, z) VALUES (1, 2, 2);
SELECT * FROM the_table;
INSERT INTO the_replicated_table (a, b, z) VALUES (1, 2, 2);
SELECT * FROM the_replicated_table;
INSERT INTO reference_table (a, b, z) VALUES (1, 2, 2);
SELECT * FROM reference_table;
INSERT INTO citus_local_table (a, b, z) VALUES (1, 2, 2);
SELECT * FROM citus_local_table;
UPDATE the_table SET z = 3 WHERE a = 1;
UPDATE the_replicated_table SET z = 3 WHERE a = 1;
UPDATE reference_table SET z = 3 WHERE a = 1;
UPDATE citus_local_table SET z = 3 WHERE a = 1;
SELECT * FROM the_table;
@ -42,6 +54,7 @@ SELECT * FROM reference_table;
SELECT * FROM citus_local_table;
DELETE FROM the_table WHERE a = 1;
DELETE FROM the_replicated_table WHERE a = 1;
DELETE FROM reference_table WHERE a = 1;
DELETE FROM citus_local_table WHERE a = 1;
@ -51,11 +64,13 @@ SELECT * FROM citus_local_table;
-- drawing from a sequence is not possible
INSERT INTO the_table (a, b) VALUES (1, 2);
INSERT INTO the_replicated_table (a, b) VALUES (1, 2);
INSERT INTO reference_table (a, b) VALUES (1, 2);
INSERT INTO citus_local_table (a, b) VALUES (1, 2);
-- 2PC is not possible
INSERT INTO the_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
INSERT INTO the_replicated_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
INSERT INTO reference_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
INSERT INTO citus_local_table (a, b, z) VALUES (2, 3, 4), (5, 6, 7);
@ -64,6 +79,11 @@ COPY the_table (a, b, z) FROM STDIN WITH CSV;
10,10,10
11,11,11
\.
-- COPY is not possible in 2PC mode
COPY the_replicated_table (a, b, z) FROM STDIN WITH CSV;
10,10,10
11,11,11
\.
COPY reference_table (a, b, z) FROM STDIN WITH CSV;
10,10,10
11,11,11
@ -87,6 +107,8 @@ WITH del AS (DELETE FROM the_table RETURNING *)
SELECT * FROM del ORDER BY a;
WITH del AS (DELETE FROM reference_table RETURNING *)
SELECT * FROM del ORDER BY a;
WITH del AS (DELETE FROM the_replicated_table RETURNING *)
SELECT * FROM del ORDER BY a;
WITH del AS (DELETE FROM citus_local_table RETURNING *)
SELECT * FROM del ORDER BY a;

View File

@ -301,7 +301,8 @@ ORDER BY nodeport, shardid;
-- hide postgresql version dependend messages for next test only
\set VERBOSITY terse
-- deferred check should abort the transaction
-- for replicated tables use 2PC even if multi-shard commit protocol
-- is set to 2PC
BEGIN;
SET LOCAL citus.multi_shard_commit_protocol TO '1pc';
DELETE FROM researchers WHERE lab_id = 6;
@ -487,16 +488,17 @@ FOR EACH ROW EXECUTE PROCEDURE reject_bad();
\c - - - :master_port
-- should be the same story as before, just at COMMIT time
-- as we use 2PC, the transaction is rollbacked
BEGIN;
INSERT INTO objects VALUES (1, 'apple');
INSERT INTO objects VALUES (2, 'BAD');
INSERT INTO labs VALUES (9, 'Umbrella Corporation');
COMMIT;
-- data should be persisted
-- data should not persisted
SELECT * FROM objects WHERE id = 2;
-- but one placement should be bad
-- and nonne of the placements should be bad
SELECT count(*)
FROM pg_dist_shard_placement AS sp,
pg_dist_shard AS s
@ -560,11 +562,11 @@ INSERT INTO labs VALUES (9, 'BAD');
COMMIT;
\set VERBOSITY default
-- data to objects should be persisted, but labs should not...
-- none of the changes should be persisted
SELECT * FROM objects WHERE id = 1;
SELECT * FROM labs WHERE id = 8;
-- labs should be healthy, but one object placement shouldn't be
-- all placements should be healthy
SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*)
FROM pg_dist_shard_placement AS sp,
pg_dist_shard AS s

View File

@ -1193,16 +1193,10 @@ GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user;
CREATE USER router_user;
GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user;
\c - router_user - :master_port
-- first test that it is marked invalid inside a transaction block
-- we will fail to connect to worker 2, since the user does not exist
-- still, we never mark placements inactive. Instead, fail the transaction
BEGIN;
INSERT INTO failure_test VALUES (1, 1);
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement
WHERE shardid IN (
SELECT shardid FROM pg_dist_shard
WHERE logicalrelid = 'failure_test'::regclass
)
ORDER BY placementid;
ROLLBACK;
INSERT INTO failure_test VALUES (2, 1);
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement

View File

@ -87,9 +87,10 @@ SELECT count(*) >= 4 FROM pg_dist_transaction;
SELECT recover_prepared_transactions();
-- plain INSERT does not use 2PC
-- plain INSERT uses 2PC
INSERT INTO test_recovery VALUES ('hello');
SELECT count(*) FROM pg_dist_transaction;
SELECT recover_prepared_transactions();
-- Aborted DDL commands should not write transaction recovery records
BEGIN;

View File

@ -129,12 +129,9 @@ SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
-- run VACUUM and ANALYZE against the table on the master
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
VACUUM dustbunnies;
ANALYZE dustbunnies;
-- send a VACUUM FULL and a VACUUM ANALYZE
VACUUM (FULL) dustbunnies;
@ -148,8 +145,6 @@ SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::r
-- send a VACUUM FREEZE after adding a new row
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (5, 'peter');
VACUUM (FREEZE) dustbunnies;
@ -164,8 +159,6 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
-- add NULL values, then perform column-specific ANALYZE
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
ANALYZE dustbunnies (name);

View File

@ -8,7 +8,7 @@ SELECT substring(:'server_version', '\d+')::int > 13 AS server_version_above_thi
create schema pg14;
set search_path to pg14;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 980000;
SET citus.shard_count TO 2;

View File

@ -94,7 +94,8 @@ SELECT recover_prepared_transactions();
ALTER TABLE test_table ADD CONSTRAINT b_check CHECK(b > 0);
SELECT distributed_2PCs_are_equal_to_placement_count();
-- with 1PC, we should not see and distributed TXs in the pg_dist_transaction
-- even if 1PC used, we use 2PC as we modify replicated tables
-- see distributed TXs in the pg_dist_transaction
SET citus.multi_shard_commit_protocol TO '1pc';
SET citus.multi_shard_modify_mode TO 'sequential';
SELECT recover_prepared_transactions();
@ -129,7 +130,7 @@ SET citus.shard_replication_factor TO 2;
CREATE TABLE test_table_rep_2 (a int);
SELECT create_distributed_table('test_table_rep_2', 'a');
-- 1PC should never use 2PC with rep > 1
-- even if 1PC used, we use 2PC with rep > 1
SET citus.multi_shard_commit_protocol TO '1pc';
SET citus.multi_shard_modify_mode TO 'sequential';