Merge pull request #2927 from citusdata/fix_2909

ActivePrimaryNodeList: Lock DistNodeRelationId()
pull/2975/head
Philip Dubé 2019-09-13 18:22:23 +00:00 committed by GitHub
commit 012595da11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 357 additions and 612 deletions

View File

@ -64,6 +64,7 @@
#include "parser/parse_node.h"
#include "parser/parse_relation.h"
#include "parser/parser.h"
#include "storage/lmgr.h"
#include "tcop/pquery.h"
#include "tcop/tcopprot.h"
#include "utils/builtins.h"
@ -308,7 +309,7 @@ create_reference_table(PG_FUNCTION_ARGS)
*/
EnsureRelationKindSupported(relationId);
workerNodeList = ActivePrimaryNodeList();
workerNodeList = ActivePrimaryNodeList(ShareLock);
workerCount = list_length(workerNodeList);
/* if there are no workers, error out */
@ -333,7 +334,7 @@ create_reference_table(PG_FUNCTION_ARGS)
/*
* CreateDistributedTable creates distributed table in the given configuration.
* This functions contains all necessary logic to create distributed tables. It
* perform necessary checks to ensure distributing the table is safe. If it is
* performs necessary checks to ensure distributing the table is safe. If it is
* safe to distribute the table, this function creates distributed table metadata,
* creates shards and copies local data to shards. This function also handles
* partitioned tables by distributing its partitions as well.

View File

@ -89,7 +89,7 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
* either get it now, or get it in master_add_node after this transaction finishes and
* the pg_dist_object record becomes visible.
*/
LockRelationOid(DistNodeRelationId(), RowShareLock);
workerNodeList = ActivePrimaryNodeList(RowShareLock);
/*
* right after we acquired the lock we mark our objects as distributed, these changes
@ -110,7 +110,6 @@ EnsureDependenciesExistsOnAllNodes(const ObjectAddress *target)
/*
* collect and connect to all applicable nodes
*/
workerNodeList = ActivePrimaryNodeList();
if (list_length(workerNodeList) <= 0)
{
/* no nodes to execute on */

View File

@ -229,7 +229,7 @@ static void
AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode)
{
ListCell *relationIdCell = NULL;
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
const char *lockModeText = LockModeToLockModeText(lockMode);
/*

View File

@ -1066,7 +1066,7 @@ DDLTaskList(Oid relationId, const char *commandString)
List *
NodeDDLTaskList(TargetWorkerSet targets, List *commands)
{
List *workerNodes = TargetWorkerSetNodeList(targets);
List *workerNodes = TargetWorkerSetNodeList(targets, NoLock);
char *concatenatedCommands = StringJoin(commands, ';');
DDLJob *ddlJob = NULL;
ListCell *workerNodeCell = NULL;

View File

@ -979,7 +979,7 @@ CloseShardPlacementAssociation(struct MultiConnection *connection)
/*
* Note that we don't reset ConnectionPlacementHashEntry's
* primaryConnection here, that'd more complicated than it seems
* primaryConnection here, that'd be more complicated than it seems
* worth. That means we'll error out spuriously if a DML/DDL
* executing connection is closed earlier in a transaction.
*/

View File

@ -127,7 +127,7 @@ broadcast_intermediate_result(PG_FUNCTION_ARGS)
*/
BeginOrContinueCoordinatedTransaction();
nodeList = ActivePrimaryNodeList();
nodeList = ActivePrimaryNodeList(NoLock);
estate = CreateExecutorState();
resultDest = (RemoteFileDestReceiver *) CreateRemoteFileDestReceiver(resultIdString,
estate, nodeList,

View File

@ -209,7 +209,7 @@ MultiTaskTrackerExecute(Job *job)
* assigning and checking the status of tasks. The second (temporary) hash
* helps us in fetching results data from worker nodes to the master node.
*/
workerNodeList = ActivePrimaryNodeList();
workerNodeList = ActivePrimaryNodeList(NoLock);
taskTrackerCount = (uint32) list_length(workerNodeList);
/* connect as the current user for running queries */

View File

@ -30,7 +30,7 @@
/* local functions forward declarations */
static List * OpenConnectionsToAllNodes(void);
static List * OpenConnectionsToAllNodes(LOCKMODE lockMode);
static void BlockDistributedTransactions(void);
static void CreateRemoteRestorePoints(char *restoreName, List *connectionList);
@ -83,8 +83,11 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
MAXFNAMELEN - 1)));
}
/* establish connections to all nodes before taking any locks */
connectionList = OpenConnectionsToAllNodes();
/*
* establish connections to all nodes before taking any locks
* ShareLock prevents new nodes being added, rendering connectionList incomplete
*/
connectionList = OpenConnectionsToAllNodes(ShareLock);
/*
* Send a BEGIN to bust through pgbouncer. We won't actually commit since
@ -111,14 +114,14 @@ citus_create_restore_point(PG_FUNCTION_ARGS)
* of connections.
*/
static List *
OpenConnectionsToAllNodes(void)
OpenConnectionsToAllNodes(LOCKMODE lockMode)
{
List *connectionList = NIL;
List *workerNodeList = NIL;
ListCell *workerNodeCell = NULL;
int connectionFlags = FORCE_NEW_CONNECTION;
workerNodeList = ActivePrimaryNodeList();
workerNodeList = ActivePrimaryNodeList(lockMode);
foreach(workerNodeCell, workerNodeList)
{

View File

@ -177,7 +177,7 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
LockRelationOid(DistNodeRelationId(), RowShareLock);
/* load and sort the worker node list for deterministic placement */
workerNodeList = ActivePrimaryNodeList();
workerNodeList = ActivePrimaryNodeList(NoLock);
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
/*
@ -405,8 +405,11 @@ CreateReferenceTableShard(Oid distributedTableId)
tableName)));
}
/* load and sort the worker node list for deterministic placement */
workerNodeList = ActivePrimaryNodeList();
/*
* load and sort the worker node list for deterministic placements
* create_reference_table has already acquired ActivePrimaryNodeList lock
*/
workerNodeList = ActivePrimaryNodeList(NoLock);
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
/* get the next shard id */

View File

@ -94,6 +94,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
uint32 candidateNodeIndex = 0;
List *candidateNodeList = NIL;
List *workerNodeList = NIL;
text *nullMinValue = NULL;
text *nullMaxValue = NULL;
char partitionMethod = 0;
@ -167,14 +168,15 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
shardId = GetNextShardId();
/* if enough live groups, add an extra candidate node as backup */
{
uint32 primaryNodeCount = ActivePrimaryNodeCount();
workerNodeList = ActivePrimaryNodeList(NoLock);
if (list_length(workerNodeList) > ShardReplicationFactor)
{
attemptableNodeCount = ShardReplicationFactor + 1;
}
else
{
attemptableNodeCount = ShardReplicationFactor;
if (primaryNodeCount > ShardReplicationFactor)
{
attemptableNodeCount = ShardReplicationFactor + 1;
}
}
/* first retrieve a list of random nodes for shard placements */
@ -188,7 +190,6 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
}
else if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN)
{
List *workerNodeList = ActivePrimaryNodeList();
candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId,
candidateNodeIndex);
}

View File

@ -24,6 +24,7 @@
#include "postmaster/postmaster.h"
#include "storage/fd.h"
#include "storage/ipc.h"
#include "storage/lmgr.h"
#include "storage/shmem.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
@ -297,7 +298,7 @@ WorkerGetNodeWithName(const char *hostname)
uint32
ActivePrimaryNodeCount(void)
{
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
uint32 liveWorkerCount = list_length(workerNodeList);
return liveWorkerCount;
@ -319,17 +320,25 @@ ActiveReadableNodeCount(void)
/*
* ActivePrimaryNodeList returns a list of all the active primary nodes in workerNodeHash
* lockMode specifies which lock to use on pg_dist_node, this is necessary when
* the caller wouldn't want nodes to be added concurrent to their use of this list
*/
List *
ActivePrimaryNodeList(void)
ActivePrimaryNodeList(LOCKMODE lockMode)
{
List *workerNodeList = NIL;
WorkerNode *workerNode = NULL;
HTAB *workerNodeHash = GetWorkerNodeHash();
HTAB *workerNodeHash = NULL;
HASH_SEQ_STATUS status;
EnsureModificationsCanRun();
if (lockMode != NoLock)
{
LockRelationOid(DistNodeRelationId(), lockMode);
}
workerNodeHash = GetWorkerNodeHash();
hash_seq_init(&status, workerNodeHash);
while ((workerNode = hash_seq_search(&status)) != NULL)

View File

@ -102,13 +102,13 @@ List *
OrderObjectAddressListInDependencyOrder(List *objectAddressList)
{
ObjectAddressCollector collector = { 0 };
ListCell *ojectAddressCell = NULL;
ListCell *objectAddressCell = NULL;
InitObjectAddressCollector(&collector);
foreach(ojectAddressCell, objectAddressList)
foreach(objectAddressCell, objectAddressList)
{
ObjectAddress *objectAddress = (ObjectAddress *) lfirst(ojectAddressCell);
ObjectAddress *objectAddress = (ObjectAddress *) lfirst(objectAddressCell);
if (IsObjectAddressCollected(objectAddress, &collector))
{

View File

@ -1097,7 +1097,7 @@ SchemaOwnerName(Oid objectId)
static bool
HasMetadataWorkers(void)
{
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
ListCell *workerNodeCell = NULL;
foreach(workerNodeCell, workerNodeList)

View File

@ -209,14 +209,14 @@ get_current_transaction_id(PG_FUNCTION_ARGS)
* the active backends from each node of the cluster. If you call that function from
* the coordinator, it will returns back active transaction from the coordinator as
* well. Yet, if you call it from the worker, result won't include the transactions
* on the coordinator node, since worker nodes do not aware of the coordinator.
* on the coordinator node, since worker nodes are not aware of the coordinator.
*/
Datum
get_global_active_transactions(PG_FUNCTION_ARGS)
{
TupleDesc tupleDescriptor = NULL;
Tuplestorestate *tupleStore = NULL;
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
ListCell *workerNodeCell = NULL;
List *connectionList = NIL;
ListCell *connectionCell = NULL;

View File

@ -315,7 +315,7 @@ CitusStatActivity(const char *statQuery)
{
List *citusStatsList = NIL;
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
ListCell *workerNodeCell = NULL;
char *nodeUser = NULL;
List *connectionList = NIL;
@ -453,7 +453,7 @@ GetLocalNodeCitusDistStat(const char *statQuery)
localGroupId = GetLocalGroupId();
/* get the current worker's node stats */
workerNodeList = ActivePrimaryNodeList();
workerNodeList = ActivePrimaryNodeList(NoLock);
foreach(workerNodeCell, workerNodeList)
{
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);

View File

@ -122,7 +122,7 @@ RecoverTwoPhaseCommits(void)
ListCell *workerNodeCell = NULL;
int recoveredTransactionCount = 0;
workerList = ActivePrimaryNodeList();
workerList = ActivePrimaryNodeList(NoLock);
foreach(workerNodeCell, workerList)
{

View File

@ -75,7 +75,7 @@ SendCommandToWorkerAsUser(char *nodeName, int32 nodePort, const char *nodeUser,
void
SendCommandToFirstWorker(char *command)
{
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
WorkerNode *firstWorkerNode = NULL;
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
@ -111,9 +111,9 @@ SendCommandToWorkers(TargetWorkerSet targetWorkerSet, const char *command)
* TargetWorkerSet.
*/
List *
TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet)
TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode)
{
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(lockMode);
ListCell *workerNodeCell = NULL;
List *result = NIL;
@ -148,7 +148,7 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet)
void
SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList)
{
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet);
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
ListCell *workerNodeCell = NULL;
char *nodeUser = CitusExtensionOwnerName();
ListCell *commandCell = NULL;
@ -187,7 +187,7 @@ int
SendBareOptionalCommandListToWorkersAsUser(TargetWorkerSet targetWorkerSet,
List *commandList, const char *user)
{
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet);
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
ListCell *workerNodeCell = NULL;
ListCell *commandCell = NULL;
int maxError = RESPONSE_OKAY;
@ -239,7 +239,7 @@ SendCommandToWorkersParams(TargetWorkerSet targetWorkerSet, const char *command,
{
List *connectionList = NIL;
ListCell *connectionCell = NULL;
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet);
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
ListCell *workerNodeCell = NULL;
BeginOrContinueCoordinatedTransaction();

View File

@ -967,7 +967,7 @@ CountPrimariesWithMetadata(void)
* AddNodeMetadata checks the given node information and adds the specified node to the
* pg_dist_node table of the master and workers with metadata.
* If the node already exists, the function returns the id of the node.
* If not, the following prodecure is followed while adding a node: If the groupId is not
* If not, the following procedure is followed while adding a node: If the groupId is not
* explicitly given by the user, the function picks the group that the new node should
* be in with respect to GroupSize. Then, the new node is inserted into the local
* pg_dist_node as well as the nodes with hasmetadata=true.

View File

@ -253,13 +253,11 @@ ReplicateShardToAllWorkers(ShardInterval *shardInterval)
ListCell *workerNodeCell = NULL;
/* prevent concurrent pg_dist_node changes */
LockRelationOid(DistNodeRelationId(), RowShareLock);
workerNodeList = ActivePrimaryNodeList();
workerNodeList = ActivePrimaryNodeList(ShareLock);
/*
* We will iterate over all worker nodes and if healthy placement is not exist at
* given node we will copy the shard to that node. Then we will also modify
* We will iterate over all worker nodes and if a healthy placement does not exist
* at given node we will copy the shard to that node. Then we will also modify
* the metadata to reflect newly copied shard.
*/
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
@ -391,7 +389,7 @@ uint32
CreateReferenceTableColocationId()
{
uint32 colocationId = INVALID_COLOCATION_ID;
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(ShareLock);
int shardCount = 1;
int replicationFactor = list_length(workerNodeList);
Oid distributionColumnType = InvalidOid;

View File

@ -209,7 +209,7 @@ LockShardListResourcesOnFirstWorker(LOCKMODE lockmode, List *shardIntervalList)
static bool
IsFirstWorkerNode()
{
List *workerNodeList = ActivePrimaryNodeList();
List *workerNodeList = ActivePrimaryNodeList(NoLock);
WorkerNode *firstWorkerNode = NULL;
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);

View File

@ -63,7 +63,7 @@ extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList,
uint32 placementIndex);
extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList);
extern uint32 ActivePrimaryNodeCount(void);
extern List * ActivePrimaryNodeList(void);
extern List * ActivePrimaryNodeList(LOCKMODE lockMode);
extern uint32 ActiveReadableNodeCount(void);
extern List * ActiveReadableNodeList(void);
extern WorkerNode * GetWorkerNodeByNodeId(int nodeId);

View File

@ -13,6 +13,7 @@
#define WORKER_TRANSACTION_H
#include "distributed/worker_manager.h"
#include "storage/lockdefs.h"
/*
@ -29,7 +30,7 @@ typedef enum TargetWorkerSet
/* Functions declarations for worker transactions */
extern List * GetWorkerTransactions(void);
extern List * TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet);
extern List * TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode);
extern void SendCommandToWorker(char *nodeName, int32 nodePort, const char *command);
extern void SendCommandToWorkerAsUser(char *nodeName, int32 nodePort,
const char *nodeUser, const char *command);

View File

@ -1,7 +1,7 @@
Parsed test spec with 2 sessions
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_reference_table
create_distributed_table
step s2-load-metadata-cache:
@ -24,9 +24,9 @@ step s1-commit:
step s2-copy-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -41,7 +41,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
create_distributed_table
step s2-load-metadata-cache:
@ -64,9 +64,9 @@ step s1-add-second-worker: <... completed>
1
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -81,7 +81,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table
create_distributed_table
step s2-load-metadata-cache:
@ -104,9 +104,9 @@ step s1-commit:
step s2-insert-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -121,7 +121,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
create_distributed_table
step s2-load-metadata-cache:
@ -144,9 +144,9 @@ step s1-add-second-worker: <... completed>
1
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -161,7 +161,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table
create_distributed_table
step s2-load-metadata-cache:
@ -184,9 +184,9 @@ step s1-commit:
step s2-ddl-on-reference-table: <... completed>
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
@ -201,7 +201,7 @@ master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table
create_distributed_table
step s2-load-metadata-cache:
@ -224,9 +224,9 @@ step s1-add-second-worker: <... completed>
1
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
@ -240,10 +240,96 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
create_distributed_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
1
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-reference-table-2: <... completed>
create_reference_table
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
create_distributed_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
step s2-begin:
BEGIN;
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
create_reference_table
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
?column?
1
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_distributed_table
step s1-begin:
BEGIN;
@ -261,9 +347,9 @@ step s1-commit:
step s2-copy-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -278,7 +364,7 @@ master_remove_node
starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
create_distributed_table
step s2-begin:
@ -298,9 +384,9 @@ step s1-add-second-worker: <... completed>
1
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -315,7 +401,7 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table
create_distributed_table
step s1-begin:
@ -335,9 +421,9 @@ step s1-commit:
step s2-insert-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -352,7 +438,7 @@ master_remove_node
starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
create_distributed_table
step s2-begin:
@ -372,9 +458,9 @@ step s1-add-second-worker: <... completed>
1
step s2-print-content:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
@ -389,7 +475,7 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table
create_distributed_table
step s1-begin:
@ -409,9 +495,9 @@ step s1-commit:
step s2-ddl-on-reference-table: <... completed>
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
@ -426,7 +512,7 @@ master_remove_node
starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table
create_distributed_table
step s2-begin:
@ -446,9 +532,9 @@ step s1-add-second-worker: <... completed>
1
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
@ -461,3 +547,83 @@ master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
create_distributed_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
1
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-reference-table-2: <... completed>
create_reference_table
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
create_distributed_table
step s2-begin:
BEGIN;
step s2-create-reference-table-2:
SELECT create_reference_table('test_reference_table_2');
create_reference_table
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
?column?
1
step s2-print-content-2:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node

View File

@ -1,463 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 10
57638 t 10
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s2-begin:
BEGIN;
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 10
57638 t 10
master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
<waiting ...>
step s1-commit:
COMMIT;
step s2-insert-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 6
57638 t 6
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s2-begin:
BEGIN;
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 6
57638 t 6
master_remove_node
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
<waiting ...>
step s1-commit:
COMMIT;
step s2-ddl-on-reference-table: <... completed>
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table
step s2-load-metadata-cache:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s2-begin:
BEGIN;
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
create_reference_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 5
57638 t 5
master_remove_node
starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-begin:
BEGIN;
step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"';
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 5
57638 t 5
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
create_reference_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
<waiting ...>
step s1-commit:
COMMIT;
step s2-insert-to-reference-table: <... completed>
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
create_reference_table
step s2-begin:
BEGIN;
step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-content:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
create_reference_table
step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
nodename nodeport isactive
localhost 57638 t
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
<waiting ...>
step s1-commit:
COMMIT;
step s2-ddl-on-reference-table: <... completed>
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node
starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
create_reference_table
step s2-begin:
BEGIN;
step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
step s1-add-second-worker:
SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s1-add-second-worker: <... completed>
nodename nodeport isactive
localhost 57638 t
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport success result
57637 t 1
57638 t 1
master_remove_node

View File

@ -41,16 +41,16 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT worker_apply_shard_ddl_command (102145, 'public', '
SELECT worker_apply_shard_ddl_command (102217, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102144, 'public', '
SELECT worker_apply_shard_ddl_command (102216, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102143, 'public', '
SELECT worker_apply_shard_ddl_command (102215, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102142, 'public', '
SELECT worker_apply_shard_ddl_command (102214, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
@ -104,7 +104,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO public.test_table_102148 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
INSERT INTO public.test_table_102220 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
ROLLBACK;
@ -159,10 +159,10 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
COPY (SELECT count(*) AS count FROM test_table_102153 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102152 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102151 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102150 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102225 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102224 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102223 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102222 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
ROLLBACK;
@ -217,7 +217,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM public.test_table_102155 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 0 idle Client ClientRead postgres regression
SELECT count(*) AS count FROM public.test_table_102227 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
ROLLBACK;

View File

@ -41,16 +41,16 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT worker_apply_shard_ddl_command (102145, 'public', '
SELECT worker_apply_shard_ddl_command (102217, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102144, 'public', '
SELECT worker_apply_shard_ddl_command (102216, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102143, 'public', '
SELECT worker_apply_shard_ddl_command (102215, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT worker_apply_shard_ddl_command (102142, 'public', '
SELECT worker_apply_shard_ddl_command (102214, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
@ -104,7 +104,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO public.test_table_102148 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
INSERT INTO public.test_table_102220 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
ROLLBACK;
@ -159,10 +159,10 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM test_table_102153 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102152 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102151 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_102150 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102225 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102224 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102223 test_table WHERE true) TO STDOUTlocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
COPY (SELECT count(*) AS count FROM test_table_102222 test_table WHERE true) TO STDOUTlocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
step s2-rollback:
ROLLBACK;
@ -217,7 +217,7 @@ step s3-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM public.test_table_102155 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM public.test_table_102227 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 0 idle Client ClientRead postgres regression
step s2-rollback:
ROLLBACK;

View File

@ -29,11 +29,11 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
246 245 f
264 263 f
transactionnumberwaitingtransactionnumbers
245
246 245
263
264 263
step s1-abort:
ABORT;
@ -77,14 +77,14 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
250 249 f
251 249 f
251 250 t
268 267 f
269 267 f
269 268 t
transactionnumberwaitingtransactionnumbers
249
250 249
251 249,250
267
268 267
269 267,268
step s1-abort:
ABORT;

View File

@ -29,11 +29,11 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
247 246 f
265 264 f
transactionnumberwaitingtransactionnumbers
246
247 246
264
265 264
step s1-abort:
ABORT;
@ -77,14 +77,14 @@ step detector-dump-wait-edges:
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
251 250 f
252 250 f
252 251 t
269 268 f
270 268 f
270 269 t
transactionnumberwaitingtransactionnumbers
250
251 250
252 250,251
268
269 268
270 268,269
step s1-abort:
ABORT;

View File

@ -16,7 +16,7 @@ step s1-commit:
COMMIT;
step s2-insert: <... completed>
error in steps s1-commit s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_102417"
error in steps s1-commit s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_102489"
step s2-commit:
COMMIT;

View File

@ -2,16 +2,23 @@
# add single one of the nodes for the purpose of the test
setup
{
SET citus.shard_replication_factor to 1;
SELECT 1 FROM master_add_node('localhost', 57637);
CREATE TABLE test_reference_table (test_id integer);
CREATE TABLE test_reference_table_2 (test_id integer);
INSERT INTO test_reference_table_2 VALUES (8);
SELECT create_reference_table('test_reference_table');
CREATE TABLE test_table (x int, y int);
SELECT create_distributed_table('test_table','x');
}
# ensure that both nodes exists for the remaining of the isolation tests
# ensure neither node's added for the remaining of the isolation tests
teardown
{
DROP TABLE test_reference_table;
DROP TABLE test_reference_table_2;
DROP TABLE test_table;
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
}
@ -46,7 +53,7 @@ step "s2-load-metadata-cache"
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
}
step "s2-copy-to-reference-table"
step "s2-copy-to-reference-table"
{
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
}
@ -61,6 +68,11 @@ step "s2-ddl-on-reference-table"
CREATE INDEX reference_index ON test_reference_table(test_id);
}
step "s2-create-reference-table-2"
{
SELECT create_reference_table('test_reference_table_2');
}
step "s2-begin"
{
BEGIN;
@ -73,27 +85,37 @@ step "s2-commit"
step "s2-print-content"
{
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from %s')
ORDER BY
nodeport;
}
step "s2-print-content-2"
{
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
ORDER BY
nodeport;
}
step "s2-print-index-count"
{
SELECT
nodeport, success, result
FROM
SELECT
nodeport, success, result
FROM
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
}
# verify that copy/insert gets the invalidation and re-builts its metadata cache
# note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache"
# to ensure that metadata is cached otherwise the test would be useless since
# note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache"
# to ensure that metadata is cached otherwise the test would be useless since
# the cache would be empty and the metadata data is gathered from the tables directly
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
permutation "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
@ -101,6 +123,8 @@ permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-inser
permutation "s2-load-metadata-cache" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
permutation "s2-load-metadata-cache" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
permutation "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
# same tests without loading the cache
@ -110,3 +134,6 @@ permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1
permutation "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
permutation "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
permutation "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
permutation "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
permutation "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"