Merge pull request #2927 from citusdata/fix_2909

ActivePrimaryNodeList: Lock DistNodeRelationId()
pull/2975/head
Philip Dubé 2019-09-13 18:22:23 +00:00 committed by GitHub
commit 012595da11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 357 additions and 612 deletions

View File

@ -64,6 +64,7 @@
#include "parser/parse_node.h" #include "parser/parse_node.h"
#include "parser/parse_relation.h" #include "parser/parse_relation.h"
#include "parser/parser.h" #include "parser/parser.h"
#include "storage/lmgr.h"
#include "tcop/pquery.h" #include "tcop/pquery.h"
#include "tcop/tcopprot.h" #include "tcop/tcopprot.h"
#include "utils/builtins.h" #include "utils/builtins.h"
@ -308,7 +309,7 @@ create_reference_table(PG_FUNCTION_ARGS)
*/ */
EnsureRelationKindSupported(relationId); EnsureRelationKindSupported(relationId);
workerNodeList = ActivePrimaryNodeList(); workerNodeList = ActivePrimaryNodeList(ShareLock);
workerCount = list_length(workerNodeList); workerCount = list_length(workerNodeList);
/* if there are no workers, error out */ /* if there are no workers, error out */
@ -333,7 +334,7 @@ create_reference_table(PG_FUNCTION_ARGS)
/* /*
* CreateDistributedTable creates distributed table in the given configuration. * CreateDistributedTable creates distributed table in the given configuration.
* This functions contains all necessary logic to create distributed tables. It * This functions contains all necessary logic to create distributed tables. It
* perform necessary checks to ensure distributing the table is safe. If it is * performs necessary checks to ensure distributing the table is safe. If it is
* safe to distribute the table, this function creates distributed table metadata, * safe to distribute the table, this function creates distributed table metadata,
* creates shards and copies local data to shards. This function also handles * creates shards and copies local data to shards. This function also handles
* partitioned tables by distributing its partitions as well. * partitioned tables by distributing its partitions as well.

View File

@ -89,7 +89,7 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
* either get it now, or get it in master_add_node after this transaction finishes and * either get it now, or get it in master_add_node after this transaction finishes and
* the pg_dist_object record becomes visible. * the pg_dist_object record becomes visible.
*/ */
LockRelationOid(DistNodeRelationId(), RowShareLock); workerNodeList = ActivePrimaryNodeList(RowShareLock);
/* /*
* right after we acquired the lock we mark our objects as distributed, these changes * right after we acquired the lock we mark our objects as distributed, these changes
@ -110,7 +110,6 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
/* /*
* collect and connect to all applicable nodes * collect and connect to all applicable nodes
*/ */
workerNodeList = ActivePrimaryNodeList();
if (list_length(workerNodeList) <= 0) if (list_length(workerNodeList) <= 0)
{ {
/* no nodes to execute on */ /* no nodes to execute on */

View File

@ -229,7 +229,7 @@ static void
AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode) AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode)
{ {
ListCell *relationIdCell = NULL; ListCell *relationIdCell = NULL;
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
const char *lockModeText = LockModeToLockModeText(lockMode); const char *lockModeText = LockModeToLockModeText(lockMode);
/* /*

View File

@ -1066,7 +1066,7 @@ DDLTaskList(Oid relationId, const char *commandString)
List * List *
NodeDDLTaskList(TargetWorkerSet targets, List *commands) NodeDDLTaskList(TargetWorkerSet targets, List *commands)
{ {
List *workerNodes = TargetWorkerSetNodeList(targets); List *workerNodes = TargetWorkerSetNodeList(targets, NoLock);
char *concatenatedCommands = StringJoin(commands, ';'); char *concatenatedCommands = StringJoin(commands, ';');
DDLJob *ddlJob = NULL; DDLJob *ddlJob = NULL;
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;

View File

@ -979,7 +979,7 @@ CloseShardPlacementAssociation(struct MultiConnection *connection)
/* /*
* Note that we don't reset ConnectionPlacementHashEntry's * Note that we don't reset ConnectionPlacementHashEntry's
* primaryConnection here, that'd more complicated than it seems * primaryConnection here, that'd be more complicated than it seems
* worth. That means we'll error out spuriously if a DML/DDL * worth. That means we'll error out spuriously if a DML/DDL
* executing connection is closed earlier in a transaction. * executing connection is closed earlier in a transaction.
*/ */

View File

@ -127,7 +127,7 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS)
*/ */
BeginOrContinueCoordinatedTransaction(); BeginOrContinueCoordinatedTransaction();
nodeList = ActivePrimaryNodeList(); nodeList = ActivePrimaryNodeList(NoLock);
estate = CreateExecutorState(); estate = CreateExecutorState();
resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString, resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
estate, nodeList, estate, nodeList,

View File

@ -209,7 +209,7 @@ MultiTaskTrackerExecute(Job *job)
* assigning and checking the status of tasks. The second (temporary) hash * assigning and checking the status of tasks. The second (temporary) hash
* helps us in fetching results data from worker nodes to the master node. * helps us in fetching results data from worker nodes to the master node.
*/ */
workerNodeList = ActivePrimaryNodeList(); workerNodeList = ActivePrimaryNodeList(NoLock);
taskTrackerCount = (uint32) list_length(workerNodeList); taskTrackerCount = (uint32) list_length(workerNodeList);
/* connect as the current user for running queries */ /* connect as the current user for running queries */

View File

@ -30,7 +30,7 @@
/* local functions forward declarations */ /* local functions forward declarations */
static List * OpenConnectionsToAllNodes(void); static List * OpenConnectionsToAllNodes(LOCKMODE lockMode);
static void BlockDistributedTransactions(void); static void BlockDistributedTransactions(void);
static void CreateRemoteRestorePoints(char *restoreName, List *connectionList); static void CreateRemoteRestorePoints(char *restoreName, List *connectionList);
@ -83,8 +83,11 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
MAXFNAMELEN - 1))); MAXFNAMELEN - 1)));
} }
/* establish connections to all nodes before taking any locks */ /*
connectionList = OpenConnectionsToAllNodes(); * establish connections to all nodes before taking any locks
* ShareLock prevents new nodes being added, rendering connectionList incomplete
*/
connectionList = OpenConnectionsToAllNodes(ShareLock);
/* /*
* Send a BEGIN to bust through pgbouncer. We won't actually commit since * Send a BEGIN to bust through pgbouncer. We won't actually commit since
@ -111,14 +114,14 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
* of connections. * of connections.
*/ */
static List * static List *
OpenConnectionsToAllNodes(void) OpenConnectionsToAllNodes(LOCKMODE lockMode)
{ {
List *connectionList = NIL; List *connectionList = NIL;
List *workerNodeList = NIL; List *workerNodeList = NIL;
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
int connectionFlags = FORCE_NEW_CONNECTION; int connectionFlags = FORCE_NEW_CONNECTION;
workerNodeList = ActivePrimaryNodeList(); workerNodeList = ActivePrimaryNodeList(lockMode);
foreach(workerNodeCell, workerNodeList) foreach(workerNodeCell, workerNodeList)
{ {

View File

@ -177,7 +177,7 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
LockRelationOid(DistNodeRelationId(), RowShareLock); LockRelationOid(DistNodeRelationId(), RowShareLock);
/* load and sort the worker node list for deterministic placement */ /* load and sort the worker node list for deterministic placement */
workerNodeList = ActivePrimaryNodeList(); workerNodeList = ActivePrimaryNodeList(NoLock);
workerNodeList = SortList(workerNodeList, CompareWorkerNodes); workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
/* /*
@ -405,8 +405,11 @@ CreateReferenceTableShard(Oid distributedTableId)
tableName))); tableName)));
} }
/* load and sort the worker node list for deterministic placement */ /*
workerNodeList = ActivePrimaryNodeList(); * load and sort the worker node list for deterministic placements
* create_reference_table has already acquired ActivePrimaryNodeList lock
*/
workerNodeList = ActivePrimaryNodeList(NoLock);
workerNodeList = SortList(workerNodeList, CompareWorkerNodes); workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
/* get the next shard id */ /* get the next shard id */

View File

@ -94,6 +94,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
uint32 candidateNodeIndex = 0; uint32 candidateNodeIndex = 0;
List *candidateNodeList = NIL; List *candidateNodeList = NIL;
List *workerNodeList = NIL;
text *nullMinValue = NULL; text *nullMinValue = NULL;
text *nullMaxValue = NULL; text *nullMaxValue = NULL;
char partitionMethod = 0; char partitionMethod = 0;
@ -167,14 +168,15 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
shardId = GetNextShardId(); shardId = GetNextShardId();
/* if enough live groups, add an extra candidate node as backup */ /* if enough live groups, add an extra candidate node as backup */
{ workerNodeList = ActivePrimaryNodeList(NoLock);
uint32 primaryNodeCount = ActivePrimaryNodeCount();
attemptableNodeCount = ShardReplicationFactor; if (list_length(workerNodeList) > ShardReplicationFactor)
if (primaryNodeCount > ShardReplicationFactor)
{ {
attemptableNodeCount = ShardReplicationFactor + 1; attemptableNodeCount = ShardReplicationFactor + 1;
} }
else
{
attemptableNodeCount = ShardReplicationFactor;
} }
/* first retrieve a list of random nodes for shard placements */ /* first retrieve a list of random nodes for shard placements */
@ -188,7 +190,6 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
} }
else if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN) else if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN)
{ {
List *workerNodeList = ActivePrimaryNodeList();
candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId, candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId,
candidateNodeIndex); candidateNodeIndex);
} }

View File

@ -24,6 +24,7 @@
#include "postmaster/postmaster.h" #include "postmaster/postmaster.h"
#include "storage/fd.h" #include "storage/fd.h"
#include "storage/ipc.h" #include "storage/ipc.h"
#include "storage/lmgr.h"
#include "storage/shmem.h" #include "storage/shmem.h"
#include "utils/guc.h" #include "utils/guc.h"
#include "utils/hsearch.h" #include "utils/hsearch.h"
@ -297,7 +298,7 @@ WorkerGetNodeWithName(const char *hostname)
uint32 uint32
ActivePrimaryNodeCount(void) ActivePrimaryNodeCount(void)
{ {
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
uint32 liveWorkerCount = list_length(workerNodeList); uint32 liveWorkerCount = list_length(workerNodeList);
return liveWorkerCount; return liveWorkerCount;
@ -319,17 +320,25 @@ ActiveReadableNodeCount(void)
/* /*
* ActivePrimaryNodeList returns a list of all the active primary nodes in workerNodeHash * ActivePrimaryNodeList returns a list of all the active primary nodes in workerNodeHash
* lockMode specifies which lock to use on pg_dist_node, this is necessary when
* the caller wouldn't want nodes to be added concurrent to their use of this list
*/ */
List * List *
ActivePrimaryNodeList(void) ActivePrimaryNodeList(LOCKMODE lockMode)
{ {
List *workerNodeList = NIL; List *workerNodeList = NIL;
WorkerNode *workerNode = NULL; WorkerNode *workerNode = NULL;
HTAB *workerNodeHash = GetWorkerNodeHash(); HTAB *workerNodeHash = NULL;
HASH_SEQ_STATUS status; HASH_SEQ_STATUS status;
EnsureModificationsCanRun(); EnsureModificationsCanRun();
if (lockMode != NoLock)
{
LockRelationOid(DistNodeRelationId(), lockMode);
}
workerNodeHash = GetWorkerNodeHash();
hash_seq_init(&status, workerNodeHash); hash_seq_init(&status, workerNodeHash);
while ((workerNode = hash_seq_search(&status)) != NULL) while ((workerNode = hash_seq_search(&status)) != NULL)

View File

@ -102,13 +102,13 @@ List *
OrderObjectAddressListInDependencyOrder(List *objectAddressList) OrderObjectAddressListInDependencyOrder(List *objectAddressList)
{ {
ObjectAddressCollector collector = { 0 }; ObjectAddressCollector collector = { 0 };
ListCell *ojectAddressCell = NULL; ListCell *objectAddressCell = NULL;
InitObjectAddressCollector(&collector); InitObjectAddressCollector(&collector);
foreach(ojectAddressCell, objectAddressList) foreach(objectAddressCell, objectAddressList)
{ {
ObjectAddress *objectAddress = (ObjectAddress *) lfirst(ojectAddressCell); ObjectAddress *objectAddress = (ObjectAddress *) lfirst(objectAddressCell);
if (IsObjectAddressCollected(objectAddress, &collector)) if (IsObjectAddressCollected(objectAddress, &collector))
{ {

View File

@ -1097,7 +1097,7 @@ SchemaOwnerName(Oid objectId)
static bool static bool
HasMetadataWorkers(void) HasMetadataWorkers(void)
{ {
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
foreach(workerNodeCell, workerNodeList) foreach(workerNodeCell, workerNodeList)

View File

@ -209,14 +209,14 @@ get_current_transaction_id(PG_FUNCTION_ARGS)
* the active backends from each node of the cluster. If you call that function from * the active backends from each node of the cluster. If you call that function from
* the coordinator, it will returns back active transaction from the coordinator as * the coordinator, it will returns back active transaction from the coordinator as
* well. Yet, if you call it from the worker, result won't include the transactions * well. Yet, if you call it from the worker, result won't include the transactions
* on the coordinator node, since worker nodes do not aware of the coordinator. * on the coordinator node, since worker nodes are not aware of the coordinator.
*/ */
Datum Datum
get_global_active_transactions(PG_FUNCTION_ARGS) get_global_active_transactions(PG_FUNCTION_ARGS)
{ {
TupleDesc tupleDescriptor = NULL; TupleDesc tupleDescriptor = NULL;
Tuplestorestate *tupleStore = NULL; Tuplestorestate *tupleStore = NULL;
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
List *connectionList = NIL; List *connectionList = NIL;
ListCell *connectionCell = NULL; ListCell *connectionCell = NULL;

View File

@ -315,7 +315,7 @@ CitusStatActivity(const char *statQuery)
{ {
List *citusStatsList = NIL; List *citusStatsList = NIL;
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
char *nodeUser = NULL; char *nodeUser = NULL;
List *connectionList = NIL; List *connectionList = NIL;
@ -453,7 +453,7 @@ GetLocalNodeCitusDistStat(const char *statQuery)
localGroupId = GetLocalGroupId(); localGroupId = GetLocalGroupId();
/* get the current worker's node stats */ /* get the current worker's node stats */
workerNodeList = ActivePrimaryNodeList(); workerNodeList = ActivePrimaryNodeList(NoLock);
foreach(workerNodeCell, workerNodeList) foreach(workerNodeCell, workerNodeList)
{ {
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);

View File

@ -122,7 +122,7 @@ RecoverTwoPhaseCommits(void)
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
int recoveredTransactionCount = 0; int recoveredTransactionCount = 0;
workerList = ActivePrimaryNodeList(); workerList = ActivePrimaryNodeList(NoLock);
foreach(workerNodeCell, workerList) foreach(workerNodeCell, workerList)
{ {

View File

@ -75,7 +75,7 @@ SendCommandToWorkerAsUser(char *nodeName, int32 nodePort, const char *nodeUser,
void void
SendCommandToFirstWorker(char *command) SendCommandToFirstWorker(char *command)
{ {
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
WorkerNode *firstWorkerNode = NULL; WorkerNode *firstWorkerNode = NULL;
workerNodeList = SortList(workerNodeList, CompareWorkerNodes); workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
@ -111,9 +111,9 @@ SendCommandToWorkers(TargetWorkerSet targetWorkerSet, const char *command)
* TargetWorkerSet. * TargetWorkerSet.
*/ */
List * List *
TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet) TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode)
{ {
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(lockMode);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
List *result = NIL; List *result = NIL;
@ -148,7 +148,7 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet)
void void
SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList) SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList)
{ {
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet); List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
char *nodeUser = CitusExtensionOwnerName(); char *nodeUser = CitusExtensionOwnerName();
ListCell *commandCell = NULL; ListCell *commandCell = NULL;
@ -187,7 +187,7 @@ int
SendBareOptionalCommandListToWorkersAsUser(TargetWorkerSet targetWorkerSet, SendBareOptionalCommandListToWorkersAsUser(TargetWorkerSet targetWorkerSet,
List *commandList, const char *user) List *commandList, const char *user)
{ {
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet); List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
ListCell *commandCell = NULL; ListCell *commandCell = NULL;
int maxError = RESPONSE_OKAY; int maxError = RESPONSE_OKAY;
@ -239,7 +239,7 @@ SendCommandToWorkersParams(TargetWorkerSet targetWorkerSet, const char *command,
{ {
List *connectionList = NIL; List *connectionList = NIL;
ListCell *connectionCell = NULL; ListCell *connectionCell = NULL;
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet); List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
BeginOrContinueCoordinatedTransaction(); BeginOrContinueCoordinatedTransaction();

View File

@ -967,7 +967,7 @@ CountPrimariesWithMetadata(void)
* AddNodeMetadata checks the given node information and adds the specified node to the * AddNodeMetadata checks the given node information and adds the specified node to the
* pg_dist_node table of the master and workers with metadata. * pg_dist_node table of the master and workers with metadata.
* If the node already exists, the function returns the id of the node. * If the node already exists, the function returns the id of the node.
* If not, the following prodecure is followed while adding a node: If the groupId is not * If not, the following procedure is followed while adding a node: If the groupId is not
* explicitly given by the user, the function picks the group that the new node should * explicitly given by the user, the function picks the group that the new node should
* be in with respect to GroupSize. Then, the new node is inserted into the local * be in with respect to GroupSize. Then, the new node is inserted into the local
* pg_dist_node as well as the nodes with hasmetadata=true. * pg_dist_node as well as the nodes with hasmetadata=true.

View File

@ -253,13 +253,11 @@ ReplicateShardToAllWorkers(ShardInterval *shardInterval)
ListCell *workerNodeCell = NULL; ListCell *workerNodeCell = NULL;
/* prevent concurrent pg_dist_node changes */ /* prevent concurrent pg_dist_node changes */
LockRelationOid(DistNodeRelationId(), RowShareLock); workerNodeList = ActivePrimaryNodeList(ShareLock);
workerNodeList = ActivePrimaryNodeList();
/* /*
* We will iterate over all worker nodes and if healthy placement is not exist at * We will iterate over all worker nodes and if a healthy placement does not exist
* given node we will copy the shard to that node. Then we will also modify * at given node we will copy the shard to that node. Then we will also modify
* the metadata to reflect newly copied shard. * the metadata to reflect newly copied shard.
*/ */
workerNodeList = SortList(workerNodeList, CompareWorkerNodes); workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
@ -391,7 +389,7 @@ uint32
CreateReferenceTableColocationId() CreateReferenceTableColocationId()
{ {
uint32 colocationId = INVALID_COLOCATION_ID; uint32 colocationId = INVALID_COLOCATION_ID;
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(ShareLock);
int shardCount = 1; int shardCount = 1;
int replicationFactor = list_length(workerNodeList); int replicationFactor = list_length(workerNodeList);
Oid distributionColumnType = InvalidOid; Oid distributionColumnType = InvalidOid;

View File

@ -209,7 +209,7 @@ LockShardListResourcesOnFirstWorker(LOCKMODE lockmode, List *shardIntervalList)
static bool static bool
IsFirstWorkerNode() IsFirstWorkerNode()
{ {
List *workerNodeList = ActivePrimaryNodeList(); List *workerNodeList = ActivePrimaryNodeList(NoLock);
WorkerNode *firstWorkerNode = NULL; WorkerNode *firstWorkerNode = NULL;
workerNodeList = SortList(workerNodeList, CompareWorkerNodes); workerNodeList = SortList(workerNodeList, CompareWorkerNodes);

View File

@ -63,7 +63,7 @@ extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList,
uint32 placementIndex); uint32 placementIndex);
extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList); extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList);
extern uint32 ActivePrimaryNodeCount(void); extern uint32 ActivePrimaryNodeCount(void);
extern List * ActivePrimaryNodeList(void); extern List * ActivePrimaryNodeList(LOCKMODE lockMode);
extern uint32 ActiveReadableNodeCount(void); extern uint32 ActiveReadableNodeCount(void);
extern List * ActiveReadableNodeList(void); extern List * ActiveReadableNodeList(void);
extern WorkerNode * GetWorkerNodeByNodeId(int nodeId); extern WorkerNode * GetWorkerNodeByNodeId(int nodeId);

View File

@ -13,6 +13,7 @@
#define WORKER_TRANSACTION_H #define WORKER_TRANSACTION_H
#include "distributed/worker_manager.h" #include "distributed/worker_manager.h"
#include "storage/lockdefs.h"
/* /*
@ -29,7 +30,7 @@ typedef enum TargetWorkerSet
/* Functions declarations for worker transactions */ /* Functions declarations for worker transactions */
extern List * GetWorkerTransactions(void); extern List * GetWorkerTransactions(void);
extern List * TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet); extern List * TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode);
extern void SendCommandToWorker(char *nodeName, int32 nodePort, const char *command); extern void SendCommandToWorker(char *nodeName, int32 nodePort, const char *command);
extern void SendCommandToWorkerAsUser(char *nodeName, int32 nodePort, extern void SendCommandToWorkerAsUser(char *nodeName, int32 nodePort,
const char *nodeUser, const char *command); const char *nodeUser, const char *command);

View File

@ -1,7 +1,7 @@
Parsed test spec with 2 sessions Parsed test spec with 2 sessions
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_reference_table create_distributed_table
step s2-load-metadata-cache: step s2-load-metadata-cache:
@ -41,7 +41,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table create_distributed_table
step s2-load-metadata-cache: step s2-load-metadata-cache:
@ -81,7 +81,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table create_distributed_table
step s2-load-metadata-cache: step s2-load-metadata-cache:
@ -121,7 +121,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table create_distributed_table
step s2-load-metadata-cache: step s2-load-metadata-cache:
@ -161,7 +161,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table create_distributed_table
step s2-load-metadata-cache: step s2-load-metadata-cache:
@ -201,7 +201,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table create_distributed_table
step s2-load-metadata-cache: step s2-load-metadata-cache:
@ -240,10 +240,96 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
create_distributed_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
1
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-reference-table-2: <... completed>
create_reference_table create_reference_table
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
create_distributed_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
step s2-begin:
BEGIN;
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
create_reference_table
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
?column?
1
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_distributed_table
step s1-begin: step s1-begin:
BEGIN; BEGIN;
@ -278,7 +364,7 @@ master_remove_node
starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table create_distributed_table
step s2-begin: step s2-begin:
@ -315,7 +401,7 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table create_distributed_table
step s1-begin: step s1-begin:
@ -352,7 +438,7 @@ master_remove_node
starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table create_distributed_table
step s2-begin: step s2-begin:
@ -389,7 +475,7 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table create_distributed_table
step s1-begin: step s1-begin:
@ -426,7 +512,7 @@ master_remove_node
starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table create_distributed_table
step s2-begin: step s2-begin:
@ -461,3 +547,83 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
create_distributed_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
1
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-reference-table-2: <... completed>
create_reference_table
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
create_distributed_table
step s2-begin:
BEGIN;
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
create_reference_table
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
?column?
1
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node

View File

@ -1,463 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 10
57638 t 10
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s2-begin:
BEGIN;
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 10
57638 t 10
master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
<waiting ...>
step s1-commit:
COMMIT;
step s2-insert-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 6
57638 t 6
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s2-begin:
BEGIN;
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 6
57638 t 6
master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
<waiting ...>
step s1-commit:
COMMIT;
step s2-ddl-on-reference-table: <... completed>
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s2-begin:
BEGIN;
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_reference_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 5
57638 t 5
master_remove_node
starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-begin:
BEGIN;
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 5
57638 t 5
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
<waiting ...>
step s1-commit:
COMMIT;
step s2-insert-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-begin:
BEGIN;
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
<waiting ...>
step s1-commit:
COMMIT;
step s2-ddl-on-reference-table: <... completed>
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table
step s2-begin:
BEGIN;
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node

View File

@ -41,16 +41,16 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT worker_apply_shard_ddl_command (102145, 'public', ' SELECT worker_apply_shard_ddl_command (102217, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102144, 'public', ' SELECT worker_apply_shard_ddl_command (102216, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102143, 'public', ' SELECT worker_apply_shard_ddl_command (102215, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102142, 'public', ' SELECT worker_apply_shard_ddl_command (102214, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
@ -104,7 +104,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO public.test_table_102148 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression INSERT INTO public.test_table_102220 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
@ -159,10 +159,10 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
COPY (SELECT count(*) AS count FROM test_table_102153 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_102225 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102152 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_102224 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102151 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_102223 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102150 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM test_table_102222 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
@ -217,7 +217,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM public.test_table_102155 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 0 idle Client ClientRead postgres regression SELECT count(*) AS count FROM public.test_table_102227 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;

View File

@ -41,16 +41,16 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT worker_apply_shard_ddl_command (102145, 'public', ' SELECT worker_apply_shard_ddl_command (102217, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102144, 'public', ' SELECT worker_apply_shard_ddl_command (102216, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102143, 'public', ' SELECT worker_apply_shard_ddl_command (102215, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102142, 'public', ' SELECT worker_apply_shard_ddl_command (102214, 'public', '
ALTER TABLE test_table ADD COLUMN x INT; ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression ')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
@ -104,7 +104,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO public.test_table_102148 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression INSERT INTO public.test_table_102220 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
@ -159,10 +159,10 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM test_table_102153 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression COPY (SELECT count(*) AS count FROM test_table_102225 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102152 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression COPY (SELECT count(*) AS count FROM test_table_102224 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102151 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression COPY (SELECT count(*) AS count FROM test_table_102223 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102150 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression COPY (SELECT count(*) AS count FROM test_table_102222 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;
@ -217,7 +217,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM public.test_table_102155 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression SELECT count(*) AS count FROM public.test_table_102227 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 0 idle Client ClientRead postgres regression
step s2-rollback: step s2-rollback:
ROLLBACK; ROLLBACK;

View File

@ -29,11 +29,11 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
246 245 f 264 263 f
transactionnumberwaitingtransactionnumbers transactionnumberwaitingtransactionnumbers
245 263
246 245 264 263
step s1-abort: step s1-abort:
ABORT; ABORT;
@ -77,14 +77,14 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
250 249 f 268 267 f
251 249 f 269 267 f
251 250 t 269 268 t
transactionnumberwaitingtransactionnumbers transactionnumberwaitingtransactionnumbers
249 267
250 249 268 267
251 249,250 269 267,268
step s1-abort: step s1-abort:
ABORT; ABORT;

View File

@ -29,11 +29,11 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
247 246 f 265 264 f
transactionnumberwaitingtransactionnumbers transactionnumberwaitingtransactionnumbers
246 264
247 246 265 264
step s1-abort: step s1-abort:
ABORT; ABORT;
@ -77,14 +77,14 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
251 250 f 269 268 f
252 250 f 270 268 f
252 251 t 270 269 t
transactionnumberwaitingtransactionnumbers transactionnumberwaitingtransactionnumbers
250 268
251 250 269 268
252 250,251 270 268,269
step s1-abort: step s1-abort:
ABORT; ABORT;

View File

@ -16,7 +16,7 @@ step s1-commit:
COMMIT; COMMIT;
step s2-insert: <... completed> step s2-insert: <... completed>
error in steps s1-commit s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_102417" error in steps s1-commit s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_102489"
step s2-commit: step s2-commit:
COMMIT; COMMIT;

View File

@ -2,16 +2,23 @@
# add single one of the nodes for the purpose of the test # add single one of the nodes for the purpose of the test
setup setup
{ {
SET citus.shard_replication_factor to 1;
SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57637);
CREATE TABLE test_reference_table (test_id integer); CREATE TABLE test_reference_table (test_id integer);
CREATE TABLE test_reference_table_2 (test_id integer);
INSERT INTO test_reference_table_2 VALUES (8);
SELECT create_reference_table('test_reference_table'); SELECT create_reference_table('test_reference_table');
CREATE TABLE test_table (x int, y int);
SELECT create_distributed_table('test_table','x');
} }
# ensure that both nodes exists for the remaining of the isolation tests # ensure neither node's added for the remaining of the isolation tests
teardown teardown
{ {
DROP TABLE test_reference_table; DROP TABLE test_reference_table;
DROP TABLE test_reference_table_2;
DROP TABLE test_table;
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
} }
@ -61,6 +68,11 @@ step "s2-ddl-on-reference-table"
CREATE INDEX reference_index ON test_reference_table(test_id); CREATE INDEX reference_index ON test_reference_table(test_id);
} }
step "s2-create-reference-table-2"
{
SELECT create_reference_table('test_reference_table_2');
}
step "s2-begin" step "s2-begin"
{ {
BEGIN; BEGIN;
@ -81,6 +93,16 @@ step "s2-print-content"
nodeport; nodeport;
} }
step "s2-print-content-2"
{
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
}
step "s2-print-index-count" step "s2-print-index-count"
{ {
SELECT SELECT
@ -101,6 +123,8 @@ permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-inser
permutation "s2-load-metadata-cache" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count" permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
permutation "s2-load-metadata-cache" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count" permutation "s2-load-metadata-cache" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
permutation "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
# same tests without loading the cache # same tests without loading the cache
@ -110,3 +134,6 @@ permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1
permutation "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
permutation "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count" permutation "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
permutation "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count" permutation "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
permutation "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
permutation "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"