mirror of https://github.com/citusdata/citus.git
GUC for replicate reference tables on activate.
parent
c168a53ebc
commit
dda53a0bba
|
@ -43,12 +43,6 @@
|
|||
#include "utils/lsyscache.h"
|
||||
#include "utils/palloc.h"
|
||||
|
||||
|
||||
#define TRANSFER_MODE_AUTOMATIC 'a'
|
||||
#define TRANSFER_MODE_FORCE_LOGICAL 'l'
|
||||
#define TRANSFER_MODE_BLOCK_WRITES 'b'
|
||||
|
||||
|
||||
/* local function forward declarations */
|
||||
static char LookupShardTransferMode(Oid shardReplicationModeOid);
|
||||
static void ErrorIfTableCannotBeReplicated(Oid relationId);
|
||||
|
|
|
@ -60,6 +60,12 @@ int GroupSize = 1;
|
|||
/* config variable managed via guc.c */
|
||||
char *CurrentCluster = "default";
|
||||
|
||||
/*
|
||||
* Config variable to control whether we should replicate reference tables on
|
||||
* node activation or we should defer it to shard creation.
|
||||
*/
|
||||
bool ReplicateReferenceTablesOnActivate = true;
|
||||
|
||||
/* did current transaction modify pg_dist_node? */
|
||||
bool TransactionModifiedNodeMetadata = false;
|
||||
|
||||
|
@ -381,6 +387,12 @@ SetUpDistributedTableDependencies(WorkerNode *newWorkerNode)
|
|||
ReplicateAllDependenciesToNode(newWorkerNode->workerName,
|
||||
newWorkerNode->workerPort);
|
||||
|
||||
if (ReplicateReferenceTablesOnActivate)
|
||||
{
|
||||
ReplicateAllReferenceTablesToNode(newWorkerNode->workerName,
|
||||
newWorkerNode->workerPort);
|
||||
}
|
||||
|
||||
/*
|
||||
* Let the maintenance daemon do the hard work of syncing the metadata.
|
||||
* We prefer this because otherwise node activation might fail within
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#include "distributed/multi_server_executor.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/placement_connection.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
#include "distributed/run_from_same_connection.h"
|
||||
#include "distributed/query_pushdown_planning.h"
|
||||
|
@ -1370,6 +1371,16 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.replicate_reference_tables_on_activate",
|
||||
NULL,
|
||||
NULL,
|
||||
&ReplicateReferenceTablesOnActivate,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/* warn about config items in the citus namespace that are not registered above */
|
||||
EmitWarningsOnPlaceholders("citus");
|
||||
}
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
|
||||
#include "udfs/citus_extradata_container/9.3-2.sql"
|
||||
#include "udfs/update_distributed_table_colocation/9.3-2.sql"
|
||||
#include "udfs/replicate_reference_tables/9.3-1.sql"
|
||||
#include "udfs/replicate_reference_tables/9.3-2.sql"
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "distributed/master_metadata_utility.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
|
@ -38,11 +39,12 @@
|
|||
#include "utils/lsyscache.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
|
||||
/* local function forward declarations */
|
||||
static List * WorkersWithoutReferenceTablePlacement(uint64 shardId);
|
||||
static void CopyShardPlacementToWorkerNode(ShardPlacement *sourceShardPlacement,
|
||||
WorkerNode *workerNode, const char *userName);
|
||||
static List * WorkersWithoutReferenceTablePlacement(uint64 shardId, LOCKMODE lockMode);
|
||||
static StringInfo CopyShardPlacementToWorkerNodeQuery(
|
||||
ShardPlacement *sourceShardPlacement,
|
||||
WorkerNode *workerNode,
|
||||
char transferMode);
|
||||
static void ReplicateSingleShardTableToAllNodes(Oid relationId);
|
||||
static void ReplicateShardToAllNodes(ShardInterval *shardInterval);
|
||||
static void ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName,
|
||||
|
@ -110,7 +112,20 @@ EnsureReferenceTablesExistOnAllNodes(void)
|
|||
return;
|
||||
}
|
||||
|
||||
/* prevent this function from running concurrently with itself */
|
||||
/*
|
||||
* Prevent this function from running concurrently with itself.
|
||||
*
|
||||
* It also prevents concurrent DROP TABLE or DROP SCHEMA. We need this
|
||||
* because through-out this function we assume values in referenceTableIdList
|
||||
* are still valid.
|
||||
*
|
||||
* We don't need to handle other kinds of reference table DML/DDL here, since
|
||||
* master_copy_shard_placement gets enough locks for that.
|
||||
*
|
||||
* We also don't need special handling for concurrent create_refernece_table.
|
||||
* Since that will trigger a call to this function from another backend,
|
||||
* which will block until our call is finished.
|
||||
*/
|
||||
int colocationId = CreateReferenceTableColocationId();
|
||||
LockColocationId(colocationId, ExclusiveLock);
|
||||
|
||||
|
@ -123,18 +138,26 @@ EnsureReferenceTablesExistOnAllNodes(void)
|
|||
}
|
||||
|
||||
Oid referenceTableId = linitial_oid(referenceTableIdList);
|
||||
const char *referenceTableName = get_rel_name(referenceTableId);
|
||||
List *shardIntervalList = LoadShardIntervalList(referenceTableId);
|
||||
if (list_length(shardIntervalList) == 0)
|
||||
{
|
||||
/* check for corrupt metadata */
|
||||
ereport(ERROR, (errmsg("reference table \"%s\" does not have a shard",
|
||||
get_rel_name(referenceTableId))));
|
||||
referenceTableName)));
|
||||
}
|
||||
|
||||
ShardInterval *shardInterval = (ShardInterval *) linitial(shardIntervalList);
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
|
||||
List *newWorkersList = WorkersWithoutReferenceTablePlacement(shardId);
|
||||
/*
|
||||
* We only take an access share lock, otherwise we'll hold up master_add_node.
|
||||
* In case of create_reference_table() and upgrade_to_reference_table(), where
|
||||
* we don't want concurrent writes to pg_dist_node, we have already acquired
|
||||
* ShareLock on pg_dist_node.
|
||||
*/
|
||||
List *newWorkersList = WorkersWithoutReferenceTablePlacement(shardId,
|
||||
AccessShareLock);
|
||||
if (list_length(newWorkersList) == 0)
|
||||
{
|
||||
/* nothing to do, no need for lock */
|
||||
|
@ -171,21 +194,48 @@ EnsureReferenceTablesExistOnAllNodes(void)
|
|||
if (sourceShardPlacement == NULL)
|
||||
{
|
||||
/* check for corrupt metadata */
|
||||
ereport(ERROR, (errmsg("reference table shard " UINT64_FORMAT " does not "
|
||||
"have an active shard placement",
|
||||
ereport(ERROR, (errmsg("reference table shard "
|
||||
UINT64_FORMAT
|
||||
" does not have an active shard placement",
|
||||
shardId)));
|
||||
}
|
||||
|
||||
WorkerNode *newWorkerNode = NULL;
|
||||
foreach_ptr(newWorkerNode, newWorkersList)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("replicating reference table '%s' to %s:%d ...",
|
||||
referenceTableName, newWorkerNode->workerName,
|
||||
newWorkerNode->workerPort)));
|
||||
|
||||
/*
|
||||
* Call master_copy_shard_placement using citus extension owner. Current
|
||||
* user might not have permissions to do the copy.
|
||||
*/
|
||||
const char *userName = CitusExtensionOwnerName();
|
||||
CopyShardPlacementToWorkerNode(sourceShardPlacement, newWorkerNode,
|
||||
userName);
|
||||
int connectionFlags = OUTSIDE_TRANSACTION;
|
||||
|
||||
MultiConnection *connection = GetNodeUserDatabaseConnection(
|
||||
connectionFlags, "localhost", PostPortNumber,
|
||||
userName, NULL);
|
||||
|
||||
if (PQstatus(connection->pgConn) == CONNECTION_OK)
|
||||
{
|
||||
StringInfo placementCopyCommand =
|
||||
CopyShardPlacementToWorkerNodeQuery(sourceShardPlacement,
|
||||
newWorkerNode,
|
||||
TRANSFER_MODE_AUTOMATIC);
|
||||
ExecuteCriticalRemoteCommand(connection, placementCopyCommand->data);
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not open a connection to localhost "
|
||||
"when replicating reference tables"),
|
||||
errdetail(
|
||||
"citus.replicate_reference_tables_on_activate = false "
|
||||
"requires localhost connectivity.")));
|
||||
}
|
||||
|
||||
CloseConnection(connection);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -224,14 +274,13 @@ AnyRelationsModifiedInTransaction(List *relationIdList)
|
|||
* supposed to.
|
||||
*/
|
||||
static List *
|
||||
WorkersWithoutReferenceTablePlacement(uint64 shardId)
|
||||
WorkersWithoutReferenceTablePlacement(uint64 shardId, LOCKMODE lockMode)
|
||||
{
|
||||
List *workersWithoutPlacements = NIL;
|
||||
|
||||
List *shardPlacementList = ActiveShardPlacementList(shardId);
|
||||
|
||||
/* we only take an access share lock, otherwise we'll hold up master_add_node */
|
||||
List *workerNodeList = ReferenceTablePlacementNodeList(AccessShareLock);
|
||||
List *workerNodeList = ReferenceTablePlacementNodeList(lockMode);
|
||||
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
@ -252,33 +301,33 @@ WorkersWithoutReferenceTablePlacement(uint64 shardId)
|
|||
|
||||
|
||||
/*
|
||||
* CopyShardPlacementToWorkerNode runs master_copy_shard_placement
|
||||
* using the given username by connecting to localhost.
|
||||
* CopyShardPlacementToWorkerNodeQuery returns the master_copy_shard_placement
|
||||
* command to copy the given shard placement to given node.
|
||||
*/
|
||||
static void
|
||||
CopyShardPlacementToWorkerNode(ShardPlacement *sourceShardPlacement,
|
||||
static StringInfo
|
||||
CopyShardPlacementToWorkerNodeQuery(ShardPlacement *sourceShardPlacement,
|
||||
WorkerNode *workerNode,
|
||||
const char *userName)
|
||||
char transferMode)
|
||||
{
|
||||
int connectionFlags = OUTSIDE_TRANSACTION;
|
||||
StringInfo queryString = makeStringInfo();
|
||||
|
||||
MultiConnection *connection = GetNodeUserDatabaseConnection(
|
||||
connectionFlags, "localhost", PostPortNumber,
|
||||
userName, NULL);
|
||||
const char *transferModeString =
|
||||
transferMode == TRANSFER_MODE_BLOCK_WRITES ? "block_writes" :
|
||||
transferMode == TRANSFER_MODE_FORCE_LOGICAL ? "force_logical" :
|
||||
"auto";
|
||||
|
||||
appendStringInfo(queryString,
|
||||
"SELECT master_copy_shard_placement("
|
||||
UINT64_FORMAT ", %s, %d, %s, %d, do_repair := false)",
|
||||
UINT64_FORMAT ", %s, %d, %s, %d, do_repair := false, "
|
||||
"transfer_mode := %s)",
|
||||
sourceShardPlacement->shardId,
|
||||
quote_literal_cstr(sourceShardPlacement->nodeName),
|
||||
sourceShardPlacement->nodePort,
|
||||
quote_literal_cstr(workerNode->workerName),
|
||||
workerNode->workerPort);
|
||||
workerNode->workerPort,
|
||||
quote_literal_cstr(transferModeString));
|
||||
|
||||
elog(DEBUG3, "%s", queryString->data);
|
||||
|
||||
ExecuteCriticalRemoteCommand(connection, queryString->data);
|
||||
return queryString;
|
||||
}
|
||||
|
||||
|
||||
|
@ -657,3 +706,63 @@ ReferenceTableReplicationFactor(void)
|
|||
int replicationFactor = list_length(nodeList);
|
||||
return replicationFactor;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ReplicateAllReferenceTablesToNode function finds all reference tables and
|
||||
* replicates them to the given worker node. It also modifies pg_dist_colocation
|
||||
* table to update the replication factor column when necessary. This function
|
||||
* skips reference tables if that node already has healthy placement of that
|
||||
* reference table to prevent unnecessary data transfer.
|
||||
*/
|
||||
void
|
||||
ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort)
|
||||
{
|
||||
List *referenceTableList = ReferenceTableOidList();
|
||||
|
||||
/* if there is no reference table, we do not need to replicate anything */
|
||||
if (list_length(referenceTableList) > 0)
|
||||
{
|
||||
List *referenceShardIntervalList = NIL;
|
||||
|
||||
/*
|
||||
* We sort the reference table list to prevent deadlocks in concurrent
|
||||
* ReplicateAllReferenceTablesToAllNodes calls.
|
||||
*/
|
||||
referenceTableList = SortList(referenceTableList, CompareOids);
|
||||
Oid referenceTableId = InvalidOid;
|
||||
foreach_oid(referenceTableId, referenceTableList)
|
||||
{
|
||||
List *shardIntervalList = LoadShardIntervalList(referenceTableId);
|
||||
ShardInterval *shardInterval = (ShardInterval *) linitial(shardIntervalList);
|
||||
|
||||
referenceShardIntervalList = lappend(referenceShardIntervalList,
|
||||
shardInterval);
|
||||
}
|
||||
|
||||
if (ClusterHasKnownMetadataWorkers())
|
||||
{
|
||||
BlockWritesToShardList(referenceShardIntervalList);
|
||||
}
|
||||
|
||||
ShardInterval *shardInterval = NULL;
|
||||
foreach_ptr(shardInterval, referenceShardIntervalList)
|
||||
{
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
|
||||
LockShardDistributionMetadata(shardId, ExclusiveLock);
|
||||
|
||||
ReplicateShardToNode(shardInterval, nodeName, nodePort);
|
||||
}
|
||||
|
||||
/* create foreign constraints between reference tables */
|
||||
foreach_ptr(shardInterval, referenceShardIntervalList)
|
||||
{
|
||||
char *tableOwner = TableOwner(shardInterval->relationId);
|
||||
List *commandList = CopyShardForeignConstraintCommandList(shardInterval);
|
||||
|
||||
SendCommandListToWorkerInSingleTransaction(nodeName, nodePort, tableOwner,
|
||||
commandList);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,11 @@
|
|||
#define CANDIDATE_NODE_FIELDS 2
|
||||
#define WORKER_NODE_FIELDS 2
|
||||
|
||||
/* transfer mode for master_copy_shard_placement */
|
||||
#define TRANSFER_MODE_AUTOMATIC 'a'
|
||||
#define TRANSFER_MODE_FORCE_LOGICAL 'l'
|
||||
#define TRANSFER_MODE_BLOCK_WRITES 'b'
|
||||
|
||||
/* Name of columnar foreign data wrapper */
|
||||
#define CSTORE_FDW_NAME "cstore_fdw"
|
||||
|
||||
|
|
|
@ -22,6 +22,6 @@ extern uint32 CreateReferenceTableColocationId(void);
|
|||
extern void DeleteAllReferenceTablePlacementsFromNodeGroup(int32 groupId);
|
||||
extern int CompareOids(const void *leftElement, const void *rightElement);
|
||||
extern int ReferenceTableReplicationFactor(void);
|
||||
|
||||
extern void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort);
|
||||
|
||||
#endif /* REFERENCE_TABLE_UTILS_H_ */
|
||||
|
|
|
@ -61,6 +61,7 @@ typedef struct WorkerNode
|
|||
extern int MaxWorkerNodesTracked;
|
||||
extern char *WorkerListFileName;
|
||||
extern char *CurrentCluster;
|
||||
extern bool ReplicateReferenceTablesOnActivate;
|
||||
|
||||
|
||||
/* Function declarations for finding worker nodes to place shards on */
|
||||
|
|
|
@ -111,3 +111,6 @@ s/worker_hash_partition_table \([0-9]+/worker_hash_partition_table \(xxxxxxx/g
|
|||
# ignore first parameter for citus_extradata_container due to differences between pg11 and pg12
|
||||
# can be removed when we remove PG_VERSION_NUM >= 120000
|
||||
s/pg_catalog.citus_extradata_container\([0-9]+/pg_catalog.citus_extradata_container\(XXX/g
|
||||
|
||||
# ignore referene table replication messages
|
||||
/replicating reference table.*$/d
|
||||
|
|
|
@ -11,6 +11,7 @@ SELECT citus.mitmproxy('conn.allow()');
|
|||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 200000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- verify we have all worker nodes present
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -38,10 +41,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -76,10 +82,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -114,10 +123,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -152,10 +164,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -191,10 +206,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -230,10 +248,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -273,10 +294,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
|
@ -315,10 +339,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -350,10 +377,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -385,10 +415,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -420,10 +453,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -455,10 +491,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
starting permutation: s1-do-not-replicate-on-activate s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -491,10 +530,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -527,10 +569,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
starting permutation: s1-do-not-replicate-on-activate s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -567,10 +612,13 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
starting permutation: s1-do-not-replicate-on-activate s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
|
@ -606,10 +654,339 @@ master_remove_node
|
|||
|
||||
|
||||
|
||||
starting permutation: s1-add-second-worker s2-begin s1-begin s1-drop-reference-table s2-replicate-reference-tables s1-commit s2-commit
|
||||
starting permutation: s1-replicate-on-activate s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-copy-to-reference-table: <... completed>
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-to-reference-table: <... completed>
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-ddl-on-reference-table: <... completed>
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-reference-table-2:
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-reference-table-2: <... completed>
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-print-content-2:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-replicate-on-activate s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-reference-table-2:
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content-2:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_reference_table_2', 'select count(*) from %s')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-do-not-replicate-on-activate s1-add-second-worker s2-begin s1-begin s1-drop-reference-table s2-replicate-reference-tables s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-do-not-replicate-on-activate:
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
|
|
|
@ -28,11 +28,11 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
360 359 f
|
||||
392 391 f
|
||||
transactionnumberwaitingtransactionnumbers
|
||||
|
||||
359
|
||||
360 359
|
||||
391
|
||||
392 391
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
|
@ -75,14 +75,14 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
364 363 f
|
||||
365 363 f
|
||||
365 364 t
|
||||
396 395 f
|
||||
397 395 f
|
||||
397 396 t
|
||||
transactionnumberwaitingtransactionnumbers
|
||||
|
||||
363
|
||||
364 363
|
||||
365 363,364
|
||||
395
|
||||
396 395
|
||||
397 395,396
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ CREATE SCHEMA local_shard_copy;
|
|||
SET search_path TO local_shard_copy;
|
||||
SET client_min_messages TO DEBUG;
|
||||
SET citus.next_shard_id TO 1570000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
SELECT * FROM master_add_node('localhost', :master_port, groupid := 0);
|
||||
DEBUG: schema "public" already exists, skipping
|
||||
DETAIL: NOTICE from localhost:xxxxx
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of
|
||||
-- metadata changes to MX tables.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id
|
||||
\gset
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000;
|
||||
|
@ -1394,6 +1395,7 @@ WHERE logicalrelid='mx_ref'::regclass;
|
|||
|
||||
\c - - - :master_port
|
||||
SELECT master_add_node('localhost', :worker_2_port);
|
||||
NOTICE: Replicating reference table "mx_ref" to the node localhost:xxxxx
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
|
@ -1406,7 +1408,8 @@ ORDER BY shardid, nodeport;
|
|||
shardid | nodename | nodeport
|
||||
---------------------------------------------------------------------
|
||||
1310073 | localhost | 57637
|
||||
(1 row)
|
||||
1310073 | localhost | 57638
|
||||
(2 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT shardid, nodename, nodeport
|
||||
|
@ -1416,7 +1419,8 @@ ORDER BY shardid, nodeport;
|
|||
shardid | nodename | nodeport
|
||||
---------------------------------------------------------------------
|
||||
1310073 | localhost | 57637
|
||||
(1 row)
|
||||
1310073 | localhost | 57638
|
||||
(2 rows)
|
||||
|
||||
-- Get the metadata back into a consistent state
|
||||
\c - - - :master_port
|
||||
|
|
|
@ -8,6 +8,7 @@ SELECT nextval('pg_catalog.pg_dist_shardid_seq') AS last_shard_id \gset
|
|||
SET citus.replication_model TO streaming;
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- Simulates a readonly node by setting default_transaction_read_only.
|
||||
CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN)
|
||||
RETURNS TEXT
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
--
|
||||
-- Tests that check the metadata after master_remove_node.
|
||||
SET citus.next_shard_id TO 1380000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1380000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1380000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000;
|
||||
|
@ -216,6 +217,7 @@ WHERE
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- remove same node twice
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
ERROR: node at "localhost:xxxxx" does not exist
|
||||
|
@ -448,6 +450,7 @@ WHERE
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
|
@ -570,6 +573,7 @@ SELECT * FROM remove_node_reference_table;
|
|||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
|
@ -679,6 +683,7 @@ WHERE
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
SET citus.next_shard_id TO 1380001;
|
||||
-- verify table structure is changed
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass;
|
||||
|
@ -885,6 +890,7 @@ WHERE
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
|
@ -994,6 +1000,7 @@ WHERE
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
|
|
|
@ -8,6 +8,7 @@ SET citus.next_shard_id TO 1370000;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
-- only query shards created in this test
|
||||
CREATE VIEW pg_dist_shard_placement_view AS
|
||||
SELECT * FROM pg_dist_shard_placement WHERE shardid BETWEEN 1370000 AND 1380000;
|
||||
|
@ -846,6 +847,73 @@ SELECT min(result) = max(result) AS consistent FROM run_command_on_placements('r
|
|||
t
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
SELECT count(*) AS ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass \gset
|
||||
-- remove reference table replica from worker 2
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
-- test setting citus.replicate_reference_tables_on_activate to on
|
||||
-- master_add_node
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- master_activate_node
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT min(result) = max(result) AS consistent FROM run_command_on_placements('ref_table', 'SELECT sum(a) FROM %s');
|
||||
consistent
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- test adding an invalid node while we have reference tables to replicate
|
||||
-- set client message level to ERROR and verbosity to terse to supporess
|
||||
-- OS-dependent host name resolution warnings
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
-- Tests that check the metadata returned by the master node.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000;
|
||||
SET citus.shard_count TO 4;
|
||||
SET client_min_messages TO WARNING;
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||
CREATE TABLE transactional_drop_shards(column1 int);
|
||||
SELECT create_distributed_table('transactional_drop_shards', 'column1');
|
||||
|
@ -653,6 +654,7 @@ ORDER BY
|
|||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO WARNING;
|
||||
-- try using the coordinator as a worker and then dropping the table
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port);
|
||||
?column?
|
||||
|
@ -685,8 +687,6 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
|||
|
||||
-- test DROP TABLE as a non-superuser in a transaction block
|
||||
CREATE USER try_drop_table WITH LOGIN;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers('CREATE USER try_drop_table WITH LOGIN');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
CREATE SCHEMA "extension'test";
|
||||
-- use a schema name with escape character
|
||||
SET search_path TO "extension'test";
|
||||
SET client_min_messages TO WARNING;
|
||||
-- create an extension on the given search_path
|
||||
-- the extension is on contrib, so should be avaliable for the regression tests
|
||||
CREATE EXTENSION seg;
|
||||
|
@ -88,10 +89,8 @@ SELECT create_reference_table('ref_table');
|
|||
(1 row)
|
||||
|
||||
-- now, drop the extension, recreate it with an older version and update it to latest version
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP EXTENSION isn CASCADE;
|
||||
CREATE EXTENSION isn WITH VERSION "1.1";
|
||||
RESET client_min_messages;
|
||||
-- before updating the version, ensure the current version
|
||||
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$);
|
||||
run_command_on_workers
|
||||
|
@ -137,22 +136,16 @@ SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELE
|
|||
(localhost,57638,t,public)
|
||||
(2 rows)
|
||||
|
||||
-- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement
|
||||
SET client_min_messages TO WARNING;
|
||||
-- drop the extension finally
|
||||
DROP EXTENSION isn CASCADE;
|
||||
-- restore client_min_messages after DROP EXTENSION
|
||||
RESET client_min_messages;
|
||||
-- now make sure that the reference tables depending on an extension can be succesfully created.
|
||||
-- we should also ensure that we replicate this reference table (and hence the extension)
|
||||
-- to new nodes after calling master_activate_node.
|
||||
-- now, first drop seg and existing objects before next test
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP EXTENSION seg CASCADE;
|
||||
-- but as we have only 2 ports in postgresql tests, let's remove one of the nodes first
|
||||
-- before remove, first remove the existing relations (due to the other tests)
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
RESET client_min_messages;
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
|
@ -321,11 +314,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname
|
|||
CREATE TABLE t1 (A int);
|
||||
CREATE VIEW v1 AS select * from t1;
|
||||
ALTER EXTENSION seg ADD VIEW v1;
|
||||
NOTICE: Citus does not propagate adding/dropping member objects
|
||||
HINT: You can add/drop the member objects on the workers as well.
|
||||
ALTER EXTENSION seg DROP VIEW v1;
|
||||
NOTICE: Citus does not propagate adding/dropping member objects
|
||||
HINT: You can add/drop the member objects on the workers as well.
|
||||
DROP VIEW v1;
|
||||
DROP TABLE t1;
|
||||
-- drop multiple extensions at the same time
|
||||
|
@ -334,8 +323,6 @@ CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public;
|
|||
set citus.enable_ddl_propagation to 'off';
|
||||
CREATE EXTENSION pg_buffercache;
|
||||
set citus.enable_ddl_propagation to 'on';
|
||||
-- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP EXTENSION pg_buffercache, isn CASCADE;
|
||||
SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn');
|
||||
count
|
||||
|
@ -343,10 +330,6 @@ SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn');
|
|||
0
|
||||
(1 row)
|
||||
|
||||
-- restore client_min_messages after DROP EXTENSION
|
||||
RESET client_min_messages;
|
||||
-- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement
|
||||
SET client_min_messages TO WARNING;
|
||||
-- drop extension should just work
|
||||
DROP EXTENSION seg CASCADE;
|
||||
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
|
||||
|
@ -362,8 +345,6 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname
|
|||
(localhost,57638,t,0)
|
||||
(2 rows)
|
||||
|
||||
-- restore client_min_messages after DROP EXTENSION
|
||||
RESET client_min_messages;
|
||||
-- make sure that the extension is not avaliable anymore as a distributed object
|
||||
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
|
||||
count
|
||||
|
@ -402,13 +383,11 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam
|
|||
(2 rows)
|
||||
|
||||
-- drop the schema and all the objects
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
-- recreate for the next tests
|
||||
CREATE SCHEMA "extension'test";
|
||||
-- use a schema name with escape character
|
||||
SET search_path TO "extension'test";
|
||||
RESET client_min_messages;
|
||||
-- remove the node, we'll add back again
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
|
@ -462,5 +441,4 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname
|
|||
(2 rows)
|
||||
|
||||
-- drop the schema and all the objects
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
|
|
|
@ -29,6 +29,16 @@ step "s1-begin"
|
|||
BEGIN;
|
||||
}
|
||||
|
||||
step "s1-replicate-on-activate"
|
||||
{
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
}
|
||||
|
||||
step "s1-do-not-replicate-on-activate"
|
||||
{
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
}
|
||||
|
||||
step "s1-add-second-worker"
|
||||
{
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
@ -129,25 +139,35 @@ step "s2-print-index-count"
|
|||
// note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache"
|
||||
// to ensure that metadata is cached otherwise the test would be useless since
|
||||
// the cache would be empty and the metadata data is gathered from the tables directly
|
||||
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s2-load-metadata-cache" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
|
||||
permutation "s2-load-metadata-cache" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
|
||||
permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
|
||||
permutation "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-load-metadata-cache" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
|
||||
|
||||
|
||||
// same tests without loading the cache
|
||||
permutation "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
|
||||
permutation "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
|
||||
permutation "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
|
||||
permutation "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
|
||||
permutation "s1-do-not-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-do-not-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
|
||||
permutation "s1-do-not-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
|
||||
permutation "s1-do-not-replicate-on-activate" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
|
||||
|
||||
// same tests with replicate on activate
|
||||
permutation "s1-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s1-replicate-on-activate" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content"
|
||||
permutation "s1-replicate-on-activate" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content"
|
||||
permutation "s1-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count"
|
||||
permutation "s1-replicate-on-activate" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count"
|
||||
permutation "s1-replicate-on-activate" "s1-begin" "s1-add-second-worker" "s2-create-reference-table-2" "s1-commit" "s2-print-content-2"
|
||||
permutation "s1-replicate-on-activate" "s2-begin" "s2-create-reference-table-2" "s1-add-second-worker" "s2-commit" "s2-print-content-2"
|
||||
|
||||
// verify drop table blocks replicate reference tables
|
||||
permutation "s1-add-second-worker" "s2-begin" "s1-begin" "s1-drop-reference-table" "s2-replicate-reference-tables" "s1-commit" "s2-commit"
|
||||
permutation "s1-do-not-replicate-on-activate" "s1-add-second-worker" "s2-begin" "s1-begin" "s1-drop-reference-table" "s2-replicate-reference-tables" "s1-commit" "s2-commit"
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
|
||||
SET citus.next_shard_id TO 200000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- verify we have all worker nodes present
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
|
|
|
@ -3,6 +3,7 @@ SET search_path TO local_shard_copy;
|
|||
|
||||
SET client_min_messages TO DEBUG;
|
||||
SET citus.next_shard_id TO 1570000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
SELECT * FROM master_add_node('localhost', :master_port, groupid := 0);
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id
|
||||
\gset
|
||||
|
|
|
@ -11,6 +11,7 @@ SELECT nextval('pg_catalog.pg_dist_shardid_seq') AS last_shard_id \gset
|
|||
SET citus.replication_model TO streaming;
|
||||
SET citus.shard_count TO 8;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- Simulates a readonly node by setting default_transaction_read_only.
|
||||
CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
|
||||
SET citus.next_shard_id TO 1380000;
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1380000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1380000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000;
|
||||
|
@ -115,6 +116,7 @@ WHERE
|
|||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- remove same node twice
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
|
@ -260,6 +262,7 @@ WHERE
|
|||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
@ -335,6 +338,7 @@ WHERE
|
|||
SELECT * FROM remove_node_reference_table;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
@ -406,6 +410,7 @@ WHERE
|
|||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
SET citus.next_shard_id TO 1380001;
|
||||
|
||||
|
@ -528,6 +533,7 @@ WHERE
|
|||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
@ -599,6 +605,7 @@ WHERE
|
|||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
|
|
|
@ -11,6 +11,8 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000;
|
||||
|
||||
SET citus.replicate_reference_tables_on_activate TO off;
|
||||
|
||||
-- only query shards created in this test
|
||||
CREATE VIEW pg_dist_shard_placement_view AS
|
||||
SELECT * FROM pg_dist_shard_placement WHERE shardid BETWEEN 1370000 AND 1380000;
|
||||
|
@ -547,6 +549,34 @@ COMMIT;
|
|||
|
||||
SELECT min(result) = max(result) AS consistent FROM run_command_on_placements('ref_table', 'SELECT sum(a) FROM %s');
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
SELECT count(*) AS ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass \gset
|
||||
|
||||
-- remove reference table replica from worker 2
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
|
||||
-- test setting citus.replicate_reference_tables_on_activate to on
|
||||
-- master_add_node
|
||||
SET citus.replicate_reference_tables_on_activate TO on;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
|
||||
-- master_activate_node
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
|
||||
SELECT count(*) - :ref_table_placements FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
|
||||
|
||||
SELECT min(result) = max(result) AS consistent FROM run_command_on_placements('ref_table', 'SELECT sum(a) FROM %s');
|
||||
|
||||
-- test adding an invalid node while we have reference tables to replicate
|
||||
-- set client message level to ERROR and verbosity to terse to supporess
|
||||
-- OS-dependent host name resolution warnings
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000;
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||
CREATE TABLE transactional_drop_shards(column1 int);
|
||||
|
@ -362,6 +363,7 @@ ORDER BY
|
|||
shardid, nodename, nodeport;
|
||||
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
-- try using the coordinator as a worker and then dropping the table
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port);
|
||||
|
|
|
@ -3,6 +3,8 @@ CREATE SCHEMA "extension'test";
|
|||
-- use a schema name with escape character
|
||||
SET search_path TO "extension'test";
|
||||
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
-- create an extension on the given search_path
|
||||
-- the extension is on contrib, so should be avaliable for the regression tests
|
||||
CREATE EXTENSION seg;
|
||||
|
@ -48,10 +50,8 @@ CREATE TABLE ref_table (a public.issn);
|
|||
SELECT create_reference_table('ref_table');
|
||||
|
||||
-- now, drop the extension, recreate it with an older version and update it to latest version
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP EXTENSION isn CASCADE;
|
||||
CREATE EXTENSION isn WITH VERSION "1.1";
|
||||
RESET client_min_messages;
|
||||
|
||||
-- before updating the version, ensure the current version
|
||||
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$);
|
||||
|
@ -77,26 +77,20 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_exte
|
|||
-- show that the ALTER EXTENSION command is propagated
|
||||
SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$);
|
||||
|
||||
-- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement
|
||||
SET client_min_messages TO WARNING;
|
||||
-- drop the extension finally
|
||||
DROP EXTENSION isn CASCADE;
|
||||
-- restore client_min_messages after DROP EXTENSION
|
||||
RESET client_min_messages;
|
||||
|
||||
-- now make sure that the reference tables depending on an extension can be succesfully created.
|
||||
-- we should also ensure that we replicate this reference table (and hence the extension)
|
||||
-- to new nodes after calling master_activate_node.
|
||||
|
||||
-- now, first drop seg and existing objects before next test
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP EXTENSION seg CASCADE;
|
||||
|
||||
-- but as we have only 2 ports in postgresql tests, let's remove one of the nodes first
|
||||
-- before remove, first remove the existing relations (due to the other tests)
|
||||
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
RESET client_min_messages;
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
-- then create the extension
|
||||
|
@ -189,15 +183,8 @@ set citus.enable_ddl_propagation to 'off';
|
|||
CREATE EXTENSION pg_buffercache;
|
||||
set citus.enable_ddl_propagation to 'on';
|
||||
|
||||
-- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP EXTENSION pg_buffercache, isn CASCADE;
|
||||
SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn');
|
||||
-- restore client_min_messages after DROP EXTENSION
|
||||
RESET client_min_messages;
|
||||
|
||||
-- SET client_min_messages TO WARNING before executing a DROP EXTENSION statement
|
||||
SET client_min_messages TO WARNING;
|
||||
|
||||
-- drop extension should just work
|
||||
DROP EXTENSION seg CASCADE;
|
||||
|
@ -205,9 +192,6 @@ DROP EXTENSION seg CASCADE;
|
|||
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg');
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$);
|
||||
|
||||
-- restore client_min_messages after DROP EXTENSION
|
||||
RESET client_min_messages;
|
||||
|
||||
-- make sure that the extension is not avaliable anymore as a distributed object
|
||||
SELECT count(*) FROM citus.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
|
||||
|
||||
|
@ -229,7 +213,6 @@ ROLLBACK;
|
|||
SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$);
|
||||
|
||||
-- drop the schema and all the objects
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
|
||||
-- recreate for the next tests
|
||||
|
@ -238,8 +221,6 @@ CREATE SCHEMA "extension'test";
|
|||
-- use a schema name with escape character
|
||||
SET search_path TO "extension'test";
|
||||
|
||||
RESET client_min_messages;
|
||||
|
||||
-- remove the node, we'll add back again
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
|
||||
|
@ -269,5 +250,4 @@ SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_ext
|
|||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$);
|
||||
|
||||
-- drop the schema and all the objects
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
|
|
Loading…
Reference in New Issue