mirror of https://github.com/citusdata/citus.git
stabilize metadata syncing (#6728)
**Motivation** Some customers experienced **out of memory** or **max allocation block size** errors during metadata sync when they had a lot of shards, partitions, indexes, or columns. This PR has motivation to prevent those 2 types of memory failures to boost the scalability of Citus and unlock some customers with huge clusters by letting them **add new nodes** and **upgrade their Citus version above 11.0** which introduced important features e.g. query from any node. **Problems** Memory errors are caused by the fact that we finish all the metadata sync operations within a single coordinated transaction, which causes mainly 3 problems: 1. Collecting metadata sync commands without freeing until the end of the transaction, 2. Each modification causes PG invalidations related to cache memory. PG stores those invalidations until the end of transaction (for visibility guarantees) to notify other backends about the invalidations. As we do a lot of modifications during the metadata syncing within single coordinated transaction, PG can sometimes exceed max allocation block size at worker nodes due to huge invalidation messages, 3. Citus has MetadataCacheMemory for fast access to metadata objects. To see the effects of the modifications inside the same transaction, we locally process PG invalidations and rebuild many objects without freeing invalidated ones until the end of transaction for simplicity. **Solution** We decided to add nontransactional mode for metadata sync, where we send each command in separate transaction and reset memory context after each transaction. User can switch to nontransactional mode via a GUC if they hit memory problems during the sync. (Default mode is transactional) We created a common api for both transactional (old mode) and nontransactional modes to have a uniform code and to not disturb test coverage by introducing new code paths. Below items are addressed for the solution: - [x] **Commit-1** Add a method to send multiple commands to worker list reusing bare connections. Change will be useful for metadata sync api, - [x] **Commit-2** Create MetadataSyncContext api to encapsulate both transactional and nontransactional modes, - [x] **Commit-3** Let nontransactional sync mode create transaction per shell table during dropping the shell tables from worker, - [x] **Commit-4** Add new metadata sync methods which uses MetadataSyncContext api so that during the sync we can 1. free memory to prevent OOM, 2. use either transactional or nontransactional modes according to the GUC `citus.metadata_sync_transaction_mode`. - [x] **Commit-5** Let `ActivateNode` use new metadata sync api, - [x] **Commit-6** Let `activate_node_snapshot` use new metadata sync api, - [x] **Commit-7** Remove unused old metadata sync methods, - [x] **Commit-8** Drop table, if exists, during table dependency creation, - [x] **Commit-9** Do not enforce distributed transaction at `EnsureCoordinatorInitiatedOperation`, - [x] **Commit-10** Do not acquire strict lock on separate transaction to localhost as we already take the lock before, - [x] **Commit-11** Let `AddNodeMetadata` to use metadatasync api during `citus_add_node`, - [x] **Commit-12** Force activated bare connections to close at transaction end, - [x] **Commit-13** Add failure tests for nontransactional metadata sync mode, - [x] Verify OOM and max allowed allocation block errors do not happen with nontransactional sync mode. DESCRIPTION: Fixes memory leak and max allocation block errors during metadata syncing. DESCRIPTION: Introduces nontransactional mode for metadata sync. DESCRIPTION: Introduces the GUC `citus.metadata_sync_mode` to switch sync modes.pull/6807/head
commit
104e85e18f
|
@ -29,16 +29,14 @@
|
|||
#include "storage/lmgr.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
typedef bool (*AddressPredicate)(const ObjectAddress *);
|
||||
|
||||
static void EnsureDependenciesCanBeDistributed(const ObjectAddress *relationAddress);
|
||||
static void ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress);
|
||||
static int ObjectAddressComparator(const void *a, const void *b);
|
||||
static List * FilterObjectAddressListByPredicate(List *objectAddressList,
|
||||
AddressPredicate predicate);
|
||||
static void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target);
|
||||
static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency);
|
||||
static bool ShouldPropagateObject(const ObjectAddress *address);
|
||||
static char * DropTableIfExistsCommand(Oid relationId);
|
||||
|
||||
/*
|
||||
* EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes
|
||||
|
@ -325,6 +323,21 @@ GetDistributableDependenciesForObject(const ObjectAddress *target)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* DropTableIfExistsCommand returns command to drop given table if exists.
|
||||
*/
|
||||
static char *
|
||||
DropTableIfExistsCommand(Oid relationId)
|
||||
{
|
||||
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
|
||||
StringInfo dropTableCommand = makeStringInfo();
|
||||
appendStringInfo(dropTableCommand, "DROP TABLE IF EXISTS %s CASCADE",
|
||||
qualifiedRelationName);
|
||||
|
||||
return dropTableCommand->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDependencyCreateDDLCommands returns a list (potentially empty or NIL) of ddl
|
||||
* commands to execute on a worker to create the object.
|
||||
|
@ -379,6 +392,10 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
|||
commandList = lappend(commandList, GetTableDDLCommand(
|
||||
tableDDLCommand));
|
||||
}
|
||||
|
||||
/* we need to drop table, if exists, first to make table creation idempotent */
|
||||
commandList = lcons(DropTableIfExistsCommand(relationId),
|
||||
commandList);
|
||||
}
|
||||
|
||||
return commandList;
|
||||
|
@ -532,68 +549,6 @@ GetAllDependencyCreateDDLCommands(const List *dependencies)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ReplicateAllObjectsToNodeCommandList returns commands to replicate all
|
||||
* previously marked objects to a worker node. The function also sets
|
||||
* clusterHasDistributedFunction if there are any distributed functions.
|
||||
*/
|
||||
List *
|
||||
ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort)
|
||||
{
|
||||
/* since we are executing ddl commands disable propagation first, primarily for mx */
|
||||
List *ddlCommands = list_make1(DISABLE_DDL_PROPAGATION);
|
||||
|
||||
/*
|
||||
* collect all dependencies in creation order and get their ddl commands
|
||||
*/
|
||||
List *dependencies = GetDistributedObjectAddressList();
|
||||
|
||||
/*
|
||||
* Depending on changes in the environment, such as the enable_metadata_sync guc
|
||||
* there might be objects in the distributed object address list that should currently
|
||||
* not be propagated by citus as they are 'not supported'.
|
||||
*/
|
||||
dependencies = FilterObjectAddressListByPredicate(dependencies,
|
||||
&SupportedDependencyByCitus);
|
||||
|
||||
/*
|
||||
* When dependency lists are getting longer we see a delay in the creation time on the
|
||||
* workers. We would like to inform the user. Currently we warn for lists greater than
|
||||
* 100 items, where 100 is an arbitrarily chosen number. If we find it too high or too
|
||||
* low we can adjust this based on experience.
|
||||
*/
|
||||
if (list_length(dependencies) > 100)
|
||||
{
|
||||
ereport(NOTICE, (errmsg("Replicating postgres objects to node %s:%d", nodeName,
|
||||
nodePort),
|
||||
errdetail("There are %d objects to replicate, depending on your "
|
||||
"environment this might take a while",
|
||||
list_length(dependencies))));
|
||||
}
|
||||
|
||||
dependencies = OrderObjectAddressListInDependencyOrder(dependencies);
|
||||
ObjectAddress *dependency = NULL;
|
||||
foreach_ptr(dependency, dependencies)
|
||||
{
|
||||
if (IsAnyObjectAddressOwnedByExtension(list_make1(dependency), NULL))
|
||||
{
|
||||
/*
|
||||
* we expect extension-owned objects to be created as a result
|
||||
* of the extension being created.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
ddlCommands = list_concat(ddlCommands,
|
||||
GetDependencyCreateDDLCommands(dependency));
|
||||
}
|
||||
|
||||
ddlCommands = lappend(ddlCommands, ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return ddlCommands;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShouldPropagate determines if we should be propagating anything
|
||||
*/
|
||||
|
@ -749,7 +704,7 @@ ShouldPropagateAnyObject(List *addresses)
|
|||
* FilterObjectAddressListByPredicate takes a list of ObjectAddress *'s and returns a list
|
||||
* only containing the ObjectAddress *'s for which the predicate returned true.
|
||||
*/
|
||||
static List *
|
||||
List *
|
||||
FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate predicate)
|
||||
{
|
||||
List *result = NIL;
|
||||
|
|
|
@ -1202,6 +1202,17 @@ FinishConnectionEstablishment(MultiConnection *connection)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ForceConnectionCloseAtTransactionEnd marks connection to be closed at the end of the
|
||||
* transaction.
|
||||
*/
|
||||
void
|
||||
ForceConnectionCloseAtTransactionEnd(MultiConnection *connection)
|
||||
{
|
||||
connection->forceCloseAtTransactionEnd = true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ClaimConnectionExclusively signals that this connection is actively being
|
||||
* used. That means it'll not be, again, returned by
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -360,6 +360,11 @@ static const struct config_enum_entry cpu_priority_options[] = {
|
|||
{ NULL, 0, false}
|
||||
};
|
||||
|
||||
static const struct config_enum_entry metadata_sync_mode_options[] = {
|
||||
{ "transactional", METADATA_SYNC_TRANSACTIONAL, false },
|
||||
{ "nontransactional", METADATA_SYNC_NON_TRANSACTIONAL, false },
|
||||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
/* *INDENT-ON* */
|
||||
|
||||
|
@ -1880,6 +1885,21 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_UNIT_MS | GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomEnumVariable(
|
||||
"citus.metadata_sync_mode",
|
||||
gettext_noop("Sets transaction mode for metadata syncs."),
|
||||
gettext_noop("metadata sync can be run inside a single coordinated "
|
||||
"transaction or with multiple small transactions in "
|
||||
"idempotent way. By default we sync metadata in single "
|
||||
"coordinated transaction. When we hit memory problems "
|
||||
"at workers, we have alternative nontransactional mode "
|
||||
"where we send each command with separate transaction."),
|
||||
&MetadataSyncTransMode,
|
||||
METADATA_SYNC_TRANSACTIONAL, metadata_sync_mode_options,
|
||||
PGC_SUSET,
|
||||
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"citus.metadata_sync_retry_interval",
|
||||
gettext_noop("Sets the interval to retry failed metadata syncs."),
|
||||
|
|
|
@ -7,3 +7,6 @@ ALTER TABLE pg_catalog.pg_dist_placement REPLICA IDENTITY USING INDEX pg_dist_pl
|
|||
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy REPLICA IDENTITY USING INDEX pg_dist_rebalance_strategy_name_key;
|
||||
ALTER TABLE pg_catalog.pg_dist_shard REPLICA IDENTITY USING INDEX pg_dist_shard_shardid_index;
|
||||
ALTER TABLE pg_catalog.pg_dist_transaction REPLICA IDENTITY USING INDEX pg_dist_transaction_unique_constraint;
|
||||
|
||||
#include "udfs/worker_drop_all_shell_tables/11.3-1.sql"
|
||||
#include "udfs/citus_internal_mark_node_not_synced/11.3-1.sql"
|
||||
|
|
|
@ -17,3 +17,6 @@ ALTER TABLE pg_catalog.pg_dist_placement REPLICA IDENTITY NOTHING;
|
|||
ALTER TABLE pg_catalog.pg_dist_rebalance_strategy REPLICA IDENTITY NOTHING;
|
||||
ALTER TABLE pg_catalog.pg_dist_shard REPLICA IDENTITY NOTHING;
|
||||
ALTER TABLE pg_catalog.pg_dist_transaction REPLICA IDENTITY NOTHING;
|
||||
|
||||
DROP PROCEDURE pg_catalog.worker_drop_all_shell_tables(bool);
|
||||
DROP FUNCTION pg_catalog.citus_internal_mark_node_not_synced(int, int);
|
||||
|
|
6
src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/11.3-1.sql
generated
Normal file
6
src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/11.3-1.sql
generated
Normal file
|
@ -0,0 +1,6 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
|
||||
COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int)
|
||||
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
|
|
@ -0,0 +1,6 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int)
|
||||
RETURNS VOID
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$;
|
||||
COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int)
|
||||
IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.';
|
|
@ -0,0 +1,23 @@
|
|||
-- During metadata sync, when we send many ddls over single transaction, worker node can error due
|
||||
-- to reaching at max allocation block size for invalidation messages. To find a workaround for the problem,
|
||||
-- we added nontransactional metadata sync mode where we create many transaction while dropping shell tables
|
||||
-- via https://github.com/citusdata/citus/pull/6728.
|
||||
CREATE OR REPLACE PROCEDURE pg_catalog.worker_drop_all_shell_tables(singleTransaction bool DEFAULT true)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
table_name text;
|
||||
BEGIN
|
||||
-- drop shell tables within single or multiple transactions according to the flag singleTransaction
|
||||
FOR table_name IN SELECT logicalrelid::regclass::text FROM pg_dist_partition
|
||||
LOOP
|
||||
PERFORM pg_catalog.worker_drop_shell_table(table_name);
|
||||
IF not singleTransaction THEN
|
||||
COMMIT;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON PROCEDURE worker_drop_all_shell_tables(singleTransaction bool)
|
||||
IS 'drop all distributed tables only without the metadata within single transaction or '
|
||||
'multiple transaction specified by the flag singleTransaction';
|
|
@ -0,0 +1,23 @@
|
|||
-- During metadata sync, when we send many ddls over single transaction, worker node can error due
|
||||
-- to reaching at max allocation block size for invalidation messages. To find a workaround for the problem,
|
||||
-- we added nontransactional metadata sync mode where we create many transaction while dropping shell tables
|
||||
-- via https://github.com/citusdata/citus/pull/6728.
|
||||
CREATE OR REPLACE PROCEDURE pg_catalog.worker_drop_all_shell_tables(singleTransaction bool DEFAULT true)
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
DECLARE
|
||||
table_name text;
|
||||
BEGIN
|
||||
-- drop shell tables within single or multiple transactions according to the flag singleTransaction
|
||||
FOR table_name IN SELECT logicalrelid::regclass::text FROM pg_dist_partition
|
||||
LOOP
|
||||
PERFORM pg_catalog.worker_drop_shell_table(table_name);
|
||||
IF not singleTransaction THEN
|
||||
COMMIT;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON PROCEDURE worker_drop_all_shell_tables(singleTransaction bool)
|
||||
IS 'drop all distributed tables only without the metadata within single transaction or '
|
||||
'multiple transaction specified by the flag singleTransaction';
|
|
@ -49,26 +49,23 @@ activate_node_snapshot(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode();
|
||||
|
||||
List *updateLocalGroupCommand =
|
||||
list_make1(LocalGroupIdUpdateCommand(dummyWorkerNode->groupId));
|
||||
List *syncDistObjCommands = SyncDistributedObjectsCommandList(dummyWorkerNode);
|
||||
List *dropSnapshotCommands = NodeMetadataDropCommands();
|
||||
List *createSnapshotCommands = NodeMetadataCreateCommands();
|
||||
List *pgDistTableMetadataSyncCommands = PgDistTableMetadataSyncCommandList();
|
||||
/*
|
||||
* Create MetadataSyncContext which is used throughout nodes' activation.
|
||||
* As we set collectCommands to true, it would not create connections to workers.
|
||||
* Instead it would collect and return sync commands to be sent to workers.
|
||||
*/
|
||||
bool collectCommands = true;
|
||||
bool nodesAddedInSameTransaction = false;
|
||||
MetadataSyncContext *context = CreateMetadataSyncContext(list_make1(dummyWorkerNode),
|
||||
collectCommands,
|
||||
nodesAddedInSameTransaction);
|
||||
|
||||
List *activateNodeCommandList = NIL;
|
||||
ActivateNodeList(context);
|
||||
|
||||
List *activateNodeCommandList = context->collectedCommands;
|
||||
int activateNodeCommandIndex = 0;
|
||||
Oid ddlCommandTypeId = TEXTOID;
|
||||
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList,
|
||||
updateLocalGroupCommand);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList, syncDistObjCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList, dropSnapshotCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList,
|
||||
createSnapshotCommands);
|
||||
activateNodeCommandList = list_concat(activateNodeCommandList,
|
||||
pgDistTableMetadataSyncCommands);
|
||||
|
||||
int activateNodeCommandCount = list_length(activateNodeCommandList);
|
||||
Datum *activateNodeCommandDatumArray = palloc0(activateNodeCommandCount *
|
||||
sizeof(Datum));
|
||||
|
|
|
@ -1270,23 +1270,6 @@ MyBackendGotCancelledDueToDeadlock(bool clearState)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* MyBackendIsInDisributedTransaction returns true if MyBackendData
|
||||
* is in a distributed transaction.
|
||||
*/
|
||||
bool
|
||||
MyBackendIsInDisributedTransaction(void)
|
||||
{
|
||||
/* backend might not have used citus yet and thus not initialized backend data */
|
||||
if (!MyBackendData)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return IsInDistributedTransaction(MyBackendData);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ActiveDistributedTransactionNumbers returns a list of pointers to
|
||||
* transaction numbers of distributed transactions that are in progress
|
||||
|
|
|
@ -374,6 +374,54 @@ SendCommandListToWorkerOutsideTransactionWithConnection(MultiConnection *workerC
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* SendCommandListToWorkerListWithBareConnections sends the command list
|
||||
* over the specified bare connections. This function is mainly useful to
|
||||
* avoid opening an closing connections excessively by allowing reusing
|
||||
* connections to send multiple separate bare commands. The function
|
||||
* raises an error if any of the queries fail.
|
||||
*/
|
||||
void
|
||||
SendCommandListToWorkerListWithBareConnections(List *workerConnectionList,
|
||||
List *commandList)
|
||||
{
|
||||
Assert(!InCoordinatedTransaction());
|
||||
Assert(!GetCoordinatedTransactionShouldUse2PC());
|
||||
|
||||
if (list_length(commandList) == 0 || list_length(workerConnectionList) == 0)
|
||||
{
|
||||
/* nothing to do */
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* In order to avoid round-trips per query in queryStringList,
|
||||
* we join the string and send as a single command. Also,
|
||||
* if there is only a single command, avoid additional call to
|
||||
* StringJoin given that some strings can be quite large.
|
||||
*/
|
||||
char *stringToSend = (list_length(commandList) == 1) ?
|
||||
linitial(commandList) : StringJoin(commandList, ';');
|
||||
|
||||
/* send commands in parallel */
|
||||
MultiConnection *connection = NULL;
|
||||
foreach_ptr(connection, workerConnectionList)
|
||||
{
|
||||
int querySent = SendRemoteCommand(connection, stringToSend);
|
||||
if (querySent == 0)
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
bool failOnError = true;
|
||||
foreach_ptr(connection, workerConnectionList)
|
||||
{
|
||||
ClearResults(connection, failOnError);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SendCommandListToWorkerInCoordinatedTransaction opens connection to the node
|
||||
* with the given nodeName and nodePort. The commands are sent as part of the
|
||||
|
@ -390,6 +438,8 @@ SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList
|
|||
return;
|
||||
}
|
||||
|
||||
ErrorIfAnyMetadataNodeOutOfSync(workerNodeList);
|
||||
|
||||
UseCoordinatedTransaction();
|
||||
|
||||
List *connectionList = NIL;
|
||||
|
|
|
@ -503,12 +503,11 @@ GetReferenceTableColocationId()
|
|||
|
||||
|
||||
/*
|
||||
* DeleteAllReplicatedTablePlacementsFromNodeGroup function iterates over
|
||||
* list of reference and replicated hash distributed tables and deletes
|
||||
* all placements from pg_dist_placement table for given group.
|
||||
* GetAllReplicatedTableList returns all tables which has replicated placements.
|
||||
* i.e. (all reference tables) + (distributed tables with more than 1 placements)
|
||||
*/
|
||||
void
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool localOnly)
|
||||
List *
|
||||
GetAllReplicatedTableList(void)
|
||||
{
|
||||
List *referenceTableList = CitusTableTypeIdList(REFERENCE_TABLE);
|
||||
List *replicatedMetadataSyncedDistributedTableList =
|
||||
|
@ -517,13 +516,25 @@ DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool localOnly)
|
|||
List *replicatedTableList =
|
||||
list_concat(referenceTableList, replicatedMetadataSyncedDistributedTableList);
|
||||
|
||||
/* if there are no reference tables, we do not need to do anything */
|
||||
if (list_length(replicatedTableList) == 0)
|
||||
{
|
||||
return;
|
||||
return replicatedTableList;
|
||||
}
|
||||
|
||||
StringInfo deletePlacementCommand = makeStringInfo();
|
||||
|
||||
/*
|
||||
* ReplicatedPlacementsForNodeGroup filters all replicated placements for given
|
||||
* node group id.
|
||||
*/
|
||||
List *
|
||||
ReplicatedPlacementsForNodeGroup(int32 groupId)
|
||||
{
|
||||
List *replicatedTableList = GetAllReplicatedTableList();
|
||||
|
||||
if (list_length(replicatedTableList) == 0)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *replicatedPlacementsForNodeGroup = NIL;
|
||||
Oid replicatedTableId = InvalidOid;
|
||||
foreach_oid(replicatedTableId, replicatedTableList)
|
||||
{
|
||||
|
@ -538,25 +549,104 @@ DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool localOnly)
|
|||
continue;
|
||||
}
|
||||
|
||||
replicatedPlacementsForNodeGroup = list_concat(replicatedPlacementsForNodeGroup,
|
||||
placements);
|
||||
}
|
||||
|
||||
return replicatedPlacementsForNodeGroup;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeleteShardPlacementCommand returns a command for deleting given placement from
|
||||
* metadata.
|
||||
*/
|
||||
char *
|
||||
DeleteShardPlacementCommand(uint64 placementId)
|
||||
{
|
||||
StringInfo deletePlacementCommand = makeStringInfo();
|
||||
appendStringInfo(deletePlacementCommand,
|
||||
"DELETE FROM pg_catalog.pg_dist_placement "
|
||||
"WHERE placementid = " UINT64_FORMAT, placementId);
|
||||
return deletePlacementCommand->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeleteAllReplicatedTablePlacementsFromNodeGroup function iterates over
|
||||
* list of reference and replicated hash distributed tables and deletes
|
||||
* all placements from pg_dist_placement table for given group.
|
||||
*/
|
||||
void
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool localOnly)
|
||||
{
|
||||
List *replicatedPlacementListForGroup = ReplicatedPlacementsForNodeGroup(groupId);
|
||||
|
||||
/* if there are no replicated tables for the group, we do not need to do anything */
|
||||
if (list_length(replicatedPlacementListForGroup) == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
GroupShardPlacement *placement = NULL;
|
||||
foreach_ptr(placement, placements)
|
||||
foreach_ptr(placement, replicatedPlacementListForGroup)
|
||||
{
|
||||
LockShardDistributionMetadata(placement->shardId, ExclusiveLock);
|
||||
|
||||
if (!localOnly)
|
||||
{
|
||||
char *deletePlacementCommand =
|
||||
DeleteShardPlacementCommand(placement->placementId);
|
||||
|
||||
SendCommandToWorkersWithMetadata(deletePlacementCommand);
|
||||
}
|
||||
|
||||
DeleteShardPlacementRow(placement->placementId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeleteAllReplicatedTablePlacementsFromNodeGroupViaMetadataContext does the same as
|
||||
* DeleteAllReplicatedTablePlacementsFromNodeGroup except it uses metadataSyncContext for
|
||||
* connections.
|
||||
*/
|
||||
void
|
||||
DeleteAllReplicatedTablePlacementsFromNodeGroupViaMetadataContext(
|
||||
MetadataSyncContext *context, int32 groupId, bool localOnly)
|
||||
{
|
||||
List *replicatedPlacementListForGroup = ReplicatedPlacementsForNodeGroup(groupId);
|
||||
|
||||
/* if there are no replicated tables for the group, we do not need to do anything */
|
||||
if (list_length(replicatedPlacementListForGroup) == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(context->context);
|
||||
GroupShardPlacement *placement = NULL;
|
||||
foreach_ptr(placement, replicatedPlacementListForGroup)
|
||||
{
|
||||
LockShardDistributionMetadata(placement->shardId, ExclusiveLock);
|
||||
|
||||
if (!localOnly)
|
||||
{
|
||||
resetStringInfo(deletePlacementCommand);
|
||||
appendStringInfo(deletePlacementCommand,
|
||||
"DELETE FROM pg_catalog.pg_dist_placement "
|
||||
"WHERE placementid = " UINT64_FORMAT,
|
||||
placement->placementId);
|
||||
char *deletePlacementCommand =
|
||||
DeleteShardPlacementCommand(placement->placementId);
|
||||
|
||||
SendCommandToWorkersWithMetadata(deletePlacementCommand->data);
|
||||
SendOrCollectCommandListToMetadataNodes(context,
|
||||
list_make1(deletePlacementCommand));
|
||||
}
|
||||
|
||||
/* do not execute local transaction if we collect commands */
|
||||
if (!MetadataSyncCollectsCommands(context))
|
||||
{
|
||||
DeleteShardPlacementRow(placement->placementId);
|
||||
}
|
||||
|
||||
ResetMetadataSyncMemoryContext(context);
|
||||
}
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -66,7 +66,6 @@ extern int ExtractProcessIdFromGlobalPID(uint64 globalPID);
|
|||
extern void GetBackendDataForProc(PGPROC *proc, BackendData *result);
|
||||
extern void CancelTransactionDueToDeadlock(PGPROC *proc);
|
||||
extern bool MyBackendGotCancelledDueToDeadlock(bool clearState);
|
||||
extern bool MyBackendIsInDisributedTransaction(void);
|
||||
extern List * ActiveDistributedTransactionNumbers(void);
|
||||
extern LocalTransactionId GetMyProcLocalTransactionId(void);
|
||||
extern int GetExternalClientBackendCount(void);
|
||||
|
|
|
@ -323,6 +323,7 @@ extern void ShutdownConnection(MultiConnection *connection);
|
|||
/* dealing with a connection */
|
||||
extern void FinishConnectionListEstablishment(List *multiConnectionList);
|
||||
extern void FinishConnectionEstablishment(MultiConnection *connection);
|
||||
extern void ForceConnectionCloseAtTransactionEnd(MultiConnection *connection);
|
||||
extern void ClaimConnectionExclusively(MultiConnection *connection);
|
||||
extern void UnclaimConnection(MultiConnection *connection);
|
||||
extern void MarkConnectionConnected(MultiConnection *connection);
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#include "distributed/errormessage.h"
|
||||
#include "nodes/pg_list.h"
|
||||
|
||||
typedef bool (*AddressPredicate)(const ObjectAddress *);
|
||||
|
||||
extern List * GetUniqueDependenciesList(List *objectAddressesList);
|
||||
extern List * GetDependenciesForObject(const ObjectAddress *target);
|
||||
extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target);
|
||||
|
@ -33,5 +35,7 @@ extern List * GetPgDependTuplesForDependingObjects(Oid targetObjectClassId,
|
|||
Oid targetObjectId);
|
||||
extern List * GetDependingViews(Oid relationId);
|
||||
extern Oid GetDependingView(Form_pg_depend pg_depend);
|
||||
extern List * FilterObjectAddressListByPredicate(List *objectAddressList,
|
||||
AddressPredicate predicate);
|
||||
|
||||
#endif /* CITUS_DEPENDENCY_H */
|
||||
|
|
|
@ -18,9 +18,31 @@
|
|||
#include "distributed/metadata_cache.h"
|
||||
#include "nodes/pg_list.h"
|
||||
|
||||
/* managed via guc.c */
|
||||
typedef enum
|
||||
{
|
||||
METADATA_SYNC_TRANSACTIONAL = 0,
|
||||
METADATA_SYNC_NON_TRANSACTIONAL = 1
|
||||
} MetadataSyncTransactionMode;
|
||||
|
||||
/* config variables */
|
||||
extern int MetadataSyncInterval;
|
||||
extern int MetadataSyncRetryInterval;
|
||||
extern int MetadataSyncTransMode;
|
||||
|
||||
/*
|
||||
* MetadataSyncContext is used throughout metadata sync.
|
||||
*/
|
||||
typedef struct MetadataSyncContext
|
||||
{
|
||||
List *activatedWorkerNodeList; /* activated worker nodes */
|
||||
List *activatedWorkerBareConnections; /* bare connections to activated nodes */
|
||||
MemoryContext context; /* memory context for all allocations */
|
||||
MetadataSyncTransactionMode transactionMode; /* transaction mode for the sync */
|
||||
bool collectCommands; /* if we collect commands instead of sending and resetting */
|
||||
List *collectedCommands; /* collected commands. (NIL if collectCommands == false) */
|
||||
bool nodesAddedInSameTransaction; /* if the nodes are added just before activation */
|
||||
} MetadataSyncContext;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
|
@ -52,7 +74,6 @@ extern void citus_internal_add_placement_metadata_internal(int64 shardId,
|
|||
int64 shardLength,
|
||||
int32 groupId,
|
||||
int64 placementId);
|
||||
extern void SyncNodeMetadataToNode(const char *nodeNameString, int32 nodePort);
|
||||
extern void SyncCitusTableMetadata(Oid relationId);
|
||||
extern void EnsureSequentialModeMetadataOperations(void);
|
||||
extern bool ClusterHasKnownMetadataWorkers(void);
|
||||
|
@ -60,10 +81,10 @@ extern char * LocalGroupIdUpdateCommand(int32 groupId);
|
|||
extern bool ShouldSyncUserCommandForObject(ObjectAddress objectAddress);
|
||||
extern bool ShouldSyncTableMetadata(Oid relationId);
|
||||
extern bool ShouldSyncTableMetadataViaCatalog(Oid relationId);
|
||||
extern Oid FetchRelationIdFromPgPartitionHeapTuple(HeapTuple heapTuple,
|
||||
TupleDesc tupleDesc);
|
||||
extern bool ShouldSyncSequenceMetadata(Oid relationId);
|
||||
extern List * NodeMetadataCreateCommands(void);
|
||||
extern List * DistributedObjectMetadataSyncCommandList(void);
|
||||
extern List * ColocationGroupCreateCommandList(void);
|
||||
extern List * CitusTableMetadataCreateCommandList(Oid relationId);
|
||||
extern List * NodeMetadataDropCommands(void);
|
||||
extern char * MarkObjectsDistributedCreateCommand(List *addresses,
|
||||
|
@ -76,6 +97,7 @@ extern char * DistributionDeleteCommand(const char *schemaName,
|
|||
extern char * DistributionDeleteMetadataCommand(Oid relationId);
|
||||
extern char * TableOwnerResetCommand(Oid distributedRelationId);
|
||||
extern char * NodeListInsertCommand(List *workerNodeList);
|
||||
char * NodeListIdempotentInsertCommand(List *workerNodeList);
|
||||
extern List * ShardListInsertCommand(List *shardIntervalList);
|
||||
extern List * ShardDeleteCommandList(ShardInterval *shardInterval);
|
||||
extern char * NodeDeleteCommand(uint32 nodeId);
|
||||
|
@ -116,14 +138,46 @@ extern void SyncNewColocationGroupToNodes(uint32 colocationId, int shardCount,
|
|||
Oid distributionColumnCollation);
|
||||
extern void SyncDeleteColocationGroupToNodes(uint32 colocationId);
|
||||
|
||||
extern MetadataSyncContext * CreateMetadataSyncContext(List *nodeList,
|
||||
bool collectCommands,
|
||||
bool nodesAddedInSameTransaction);
|
||||
extern void EstablishAndSetMetadataSyncBareConnections(MetadataSyncContext *context);
|
||||
extern void SetMetadataSyncNodesFromNodeList(MetadataSyncContext *context,
|
||||
List *nodeList);
|
||||
extern void ResetMetadataSyncMemoryContext(MetadataSyncContext *context);
|
||||
extern bool MetadataSyncCollectsCommands(MetadataSyncContext *context);
|
||||
extern void SendOrCollectCommandListToActivatedNodes(MetadataSyncContext *context,
|
||||
List *commands);
|
||||
extern void SendOrCollectCommandListToMetadataNodes(MetadataSyncContext *context,
|
||||
List *commands);
|
||||
extern void SendOrCollectCommandListToSingleNode(MetadataSyncContext *context,
|
||||
List *commands, int nodeIdx);
|
||||
|
||||
extern void ActivateNodeList(MetadataSyncContext *context);
|
||||
|
||||
extern char * WorkerDropAllShellTablesCommand(bool singleTransaction);
|
||||
|
||||
extern void SyncDistributedObjects(MetadataSyncContext *context);
|
||||
extern void SendNodeWideObjectsSyncCommands(MetadataSyncContext *context);
|
||||
extern void SendShellTableDeletionCommands(MetadataSyncContext *context);
|
||||
extern void SendMetadataDeletionCommands(MetadataSyncContext *context);
|
||||
extern void SendColocationMetadataCommands(MetadataSyncContext *context);
|
||||
extern void SendDependencyCreationCommands(MetadataSyncContext *context);
|
||||
extern void SendDistTableMetadataCommands(MetadataSyncContext *context);
|
||||
extern void SendDistObjectCommands(MetadataSyncContext *context);
|
||||
extern void SendInterTableRelationshipCommands(MetadataSyncContext *context);
|
||||
|
||||
#define DELETE_ALL_NODES "DELETE FROM pg_dist_node"
|
||||
#define DELETE_ALL_PLACEMENTS "DELETE FROM pg_dist_placement"
|
||||
#define DELETE_ALL_SHARDS "DELETE FROM pg_dist_shard"
|
||||
#define DELETE_ALL_DISTRIBUTED_OBJECTS "DELETE FROM pg_catalog.pg_dist_object"
|
||||
#define DELETE_ALL_PARTITIONS "DELETE FROM pg_dist_partition"
|
||||
#define DELETE_ALL_COLOCATION "DELETE FROM pg_catalog.pg_dist_colocation"
|
||||
#define REMOVE_ALL_SHELL_TABLES_COMMAND \
|
||||
"SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||
#define WORKER_DROP_ALL_SHELL_TABLES \
|
||||
"CALL pg_catalog.worker_drop_all_shell_tables(%s)"
|
||||
#define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \
|
||||
"SELECT citus_internal_mark_node_not_synced(%d, %d)"
|
||||
|
||||
#define REMOVE_ALL_CITUS_TABLES_COMMAND \
|
||||
"SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition"
|
||||
#define BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \
|
||||
|
|
|
@ -338,7 +338,6 @@ extern List * GetAllDependencyCreateDDLCommands(const List *dependencies);
|
|||
extern bool ShouldPropagate(void);
|
||||
extern bool ShouldPropagateCreateInCoordinatedTransction(void);
|
||||
extern bool ShouldPropagateAnyObject(List *addresses);
|
||||
extern List * ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort);
|
||||
|
||||
/* Remaining metadata utility functions */
|
||||
extern Oid TableOwnerOid(Oid relationId);
|
||||
|
|
|
@ -17,14 +17,20 @@
|
|||
#include "listutils.h"
|
||||
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
|
||||
extern void EnsureReferenceTablesExistOnAllNodes(void);
|
||||
extern void EnsureReferenceTablesExistOnAllNodesExtended(char transferMode);
|
||||
extern bool HasNodesWithMissingReferenceTables(List **referenceTableList);
|
||||
extern uint32 CreateReferenceTableColocationId(void);
|
||||
extern uint32 GetReferenceTableColocationId(void);
|
||||
extern List * GetAllReplicatedTableList(void);
|
||||
extern List * ReplicatedPlacementsForNodeGroup(int32 groupId);
|
||||
extern char * DeleteShardPlacementCommand(uint64 placementId);
|
||||
extern void DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId,
|
||||
bool localOnly);
|
||||
extern void DeleteAllReplicatedTablePlacementsFromNodeGroupViaMetadataContext(
|
||||
MetadataSyncContext *context, int32 groupId, bool localOnly);
|
||||
extern int CompareOids(const void *leftElement, const void *rightElement);
|
||||
extern void ReplicateAllReferenceTablesToNode(WorkerNode *workerNode);
|
||||
extern void ErrorIfNotAllNodesHaveReferenceTableReplicas(List *workerNodeList);
|
||||
|
|
|
@ -62,9 +62,6 @@ extern int MaxWorkerNodesTracked;
|
|||
extern char *WorkerListFileName;
|
||||
extern char *CurrentCluster;
|
||||
|
||||
extern void ActivateNodeList(List *nodeList);
|
||||
extern int ActivateNode(char *nodeName, int nodePort);
|
||||
|
||||
/* Function declarations for finding worker nodes to place shards on */
|
||||
extern WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList);
|
||||
extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList,
|
||||
|
@ -87,6 +84,7 @@ extern WorkerNode * FindWorkerNode(const char *nodeName, int32 nodePort);
|
|||
extern WorkerNode * FindWorkerNodeOrError(const char *nodeName, int32 nodePort);
|
||||
extern WorkerNode * FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort);
|
||||
extern WorkerNode * FindNodeWithNodeId(int nodeId, bool missingOk);
|
||||
extern WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort);
|
||||
extern List * ReadDistNode(bool includeNodesFromOtherClusters);
|
||||
extern void EnsureCoordinator(void);
|
||||
extern void EnsureCoordinatorIsInMetadata(void);
|
||||
|
@ -105,8 +103,6 @@ extern WorkerNode * SetWorkerColumnLocalOnly(WorkerNode *workerNode, int columnI
|
|||
Datum value);
|
||||
extern uint32 CountPrimariesWithMetadata(void);
|
||||
extern WorkerNode * GetFirstPrimaryWorkerNode(void);
|
||||
extern List * SyncDistributedObjectsCommandList(WorkerNode *workerNode);
|
||||
extern List * PgDistTableMetadataSyncCommandList(void);
|
||||
|
||||
/* Function declarations for worker node utilities */
|
||||
extern int CompareWorkerNodes(const void *leftElement, const void *rightElement);
|
||||
|
|
|
@ -82,6 +82,8 @@ extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName,
|
|||
extern void SendCommandListToWorkerOutsideTransactionWithConnection(
|
||||
MultiConnection *workerConnection,
|
||||
List *commandList);
|
||||
extern void SendCommandListToWorkerListWithBareConnections(List *workerConnections,
|
||||
List *commandList);
|
||||
extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction(
|
||||
List *workerNodeList,
|
||||
const char *
|
||||
|
|
|
@ -0,0 +1,687 @@
|
|||
--
|
||||
-- failure_mx_metadata_sync_multi_trans.sql
|
||||
--
|
||||
CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans;
|
||||
SET SEARCH_PATH = mx_metadata_sync_multi_trans;
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.next_shard_id TO 16000000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
SELECT pg_backend_pid() as pid \gset
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\set VERBOSITY terse
|
||||
SET client_min_messages TO ERROR;
|
||||
-- Create roles
|
||||
CREATE ROLE foo1;
|
||||
CREATE ROLE foo2;
|
||||
-- Create sequence
|
||||
CREATE SEQUENCE seq;
|
||||
-- Create colocated distributed tables
|
||||
CREATE TABLE dist1 (id int PRIMARY KEY default nextval('seq'));
|
||||
SELECT create_distributed_table('dist1', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO dist1 SELECT i FROM generate_series(1,100) i;
|
||||
CREATE TABLE dist2 (id int PRIMARY KEY default nextval('seq'));
|
||||
SELECT create_distributed_table('dist2', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO dist2 SELECT i FROM generate_series(1,100) i;
|
||||
-- Create a reference table
|
||||
CREATE TABLE ref (id int UNIQUE);
|
||||
SELECT create_reference_table('ref');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO ref SELECT i FROM generate_series(1,100) i;
|
||||
-- Create local tables
|
||||
CREATE TABLE loc1 (id int PRIMARY KEY);
|
||||
INSERT INTO loc1 SELECT i FROM generate_series(1,100) i;
|
||||
CREATE TABLE loc2 (id int REFERENCES loc1(id));
|
||||
INSERT INTO loc2 SELECT i FROM generate_series(1,100) i;
|
||||
SELECT citus_set_coordinator_host('localhost', :master_port);
|
||||
citus_set_coordinator_host
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_add_local_table_to_metadata('loc1', cascade_via_foreign_keys => true);
|
||||
citus_add_local_table_to_metadata
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Create partitioned distributed table
|
||||
CREATE TABLE orders (
|
||||
id bigint,
|
||||
order_time timestamp without time zone NOT NULL,
|
||||
region_id bigint NOT NULL
|
||||
)
|
||||
PARTITION BY RANGE (order_time);
|
||||
SELECT create_time_partitions(
|
||||
table_name := 'orders',
|
||||
partition_interval := '1 day',
|
||||
start_from := '2020-01-01',
|
||||
end_at := '2020-01-11'
|
||||
);
|
||||
create_time_partitions
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('orders', 'region_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Initially turn metadata sync to worker2 off because we'll ingest errors to start/stop metadata sync operations
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT isactive, metadatasynced, hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
||||
isactive | metadatasynced | hasmetadata
|
||||
---------------------------------------------------------------------
|
||||
t | f | f
|
||||
(1 row)
|
||||
|
||||
-- Failure to send local group id
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_local_group SET groupid").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to drop node metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to send node metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to drop sequence
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to drop shell table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shell_tables").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shell_tables").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to delete all pg_dist_partition metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to delete all pg_dist_shard metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to delete all pg_dist_placement metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to delete all pg_dist_object metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_object").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_object").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to delete all pg_dist_colocation metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_colocation").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_colocation").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to alter or create role
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to set database owner
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Filure to create schema
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to create sequence
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to create distributed table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to create reference table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to create local table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.loc1").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.loc1").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to create distributed partitioned table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to create distributed partition table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders_p2020_01_05").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to attach partition
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_trans.orders ATTACH PARTITION mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_trans.orders ATTACH PARTITION mx_metadata_sync_multi_trans.orders_p2020_01_05").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add partition metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add shard metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add placement metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add colocation metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to add distributed object metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
|
||||
-- Failure to set isactive to true
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection not open
|
||||
-- Failure to set metadatasynced to true
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET metadatasynced = TRUE").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET metadatasynced = TRUE").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection not open
|
||||
-- Failure to set hasmetadata to true
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET hasmetadata = TRUE").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET hasmetadata = TRUE").kill()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: connection not open
|
||||
-- Show node metadata info on coordinator after failures
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
---------------------------------------------------------------------
|
||||
4 | 4 | localhost | 9060 | default | f | t | primary | default | f | t
|
||||
6 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
||||
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||
(3 rows)
|
||||
|
||||
-- Show that we can still query the node from coordinator
|
||||
SELECT COUNT(*) FROM dist1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
-- Verify that the value 103 belongs to a shard at the node to which we failed to sync metadata
|
||||
SELECT 103 AS failed_node_val \gset
|
||||
SELECT nodeid AS failed_nodeid FROM pg_dist_node WHERE metadatasynced = false \gset
|
||||
SELECT get_shard_id_for_distribution_column('dist1', :failed_node_val) AS shardid \gset
|
||||
SELECT groupid = :failed_nodeid FROM pg_dist_placement WHERE shardid = :shardid;
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Show that we can still insert into a shard at the node from coordinator
|
||||
INSERT INTO dist1 VALUES (:failed_node_val);
|
||||
-- Show that we can still update a shard at the node from coordinator
|
||||
UPDATE dist1 SET id = :failed_node_val WHERE id = :failed_node_val;
|
||||
-- Show that we can still delete from a shard at the node from coordinator
|
||||
DELETE FROM dist1 WHERE id = :failed_node_val;
|
||||
-- Show that DDL would still propagate to the node
|
||||
SET client_min_messages TO NOTICE;
|
||||
SET citus.log_remote_commands TO 1;
|
||||
CREATE SCHEMA dummy;
|
||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
||||
NOTICE: issuing CREATE SCHEMA dummy
|
||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
||||
NOTICE: issuing CREATE SCHEMA dummy
|
||||
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
||||
NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['dummy']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
||||
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
||||
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
||||
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
||||
SET citus.log_remote_commands TO 0;
|
||||
SET client_min_messages TO ERROR;
|
||||
-- Successfully activate the node after many failures
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
citus_activate_node
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
-- Activate the node once more to verify it works again with already synced metadata
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
citus_activate_node
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
-- Show node metadata info on worker2 and coordinator after success
|
||||
\c - - - :worker_2_port
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
---------------------------------------------------------------------
|
||||
4 | 4 | localhost | 9060 | default | t | t | primary | default | t | t
|
||||
6 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
||||
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||
(3 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
|
||||
---------------------------------------------------------------------
|
||||
4 | 4 | localhost | 9060 | default | t | t | primary | default | t | t
|
||||
6 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
|
||||
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
|
||||
(3 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET citus.metadata_sync_mode;
|
||||
DROP SCHEMA dummy;
|
||||
DROP SCHEMA mx_metadata_sync_multi_trans CASCADE;
|
||||
NOTICE: drop cascades to 10 other objects
|
||||
DROP ROLE foo1;
|
||||
DROP ROLE foo2;
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
@ -1197,15 +1197,6 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
ERROR: must be owner of table super_user_table
|
||||
ROLLBACK;
|
||||
-- the user only allowed to delete shards in a distributed transaction
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
ERROR: This is an internal Citus function can only be used in a distributed transaction
|
||||
ROLLBACK;
|
||||
-- the user cannot delete non-existing shards
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
|
|
|
@ -2,19 +2,22 @@ SET citus.next_shard_id TO 1220000;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1;
|
||||
-- Tests functions related to cluster membership
|
||||
-- add the nodes to the cluster
|
||||
-- add the first node to the cluster in transactional mode
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- add the second node in nontransactional mode
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
RESET citus.metadata_sync_mode;
|
||||
-- I am coordinator
|
||||
SELECT citus_is_coordinator();
|
||||
citus_is_coordinator
|
||||
|
@ -374,7 +377,7 @@ SELECT master_get_active_worker_nodes();
|
|||
SELECT * FROM master_add_node('localhost', :worker_2_port);
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
6
|
||||
(1 row)
|
||||
|
||||
ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART WITH 7;
|
||||
|
@ -445,7 +448,7 @@ SELECT run_command_on_workers('UPDATE pg_dist_placement SET shardstate=1 WHERE g
|
|||
-- when there is no primary we should get a pretty error
|
||||
UPDATE pg_dist_node SET noderole = 'secondary' WHERE nodeport=:worker_2_port;
|
||||
SELECT * FROM cluster_management_test;
|
||||
ERROR: node group 6 does not have a primary node
|
||||
ERROR: node group 5 does not have a primary node
|
||||
-- when there is no node at all in the group we should get a different error
|
||||
DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
SELECT run_command_on_workers('DELETE FROM pg_dist_node WHERE nodeport=' || :'worker_2_port');
|
||||
|
@ -455,13 +458,12 @@ SELECT run_command_on_workers('DELETE FROM pg_dist_node WHERE nodeport=' || :'wo
|
|||
(1 row)
|
||||
|
||||
SELECT * FROM cluster_management_test;
|
||||
ERROR: there is a shard placement in node group 6 but there are no nodes in that group
|
||||
ERROR: there is a shard placement in node group 5 but there are no nodes in that group
|
||||
-- clean-up
|
||||
SELECT * INTO old_placements FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
||||
DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group;
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset
|
||||
WARNING: could not find any shard placements for shardId 1220001
|
||||
WARNING: could not find any shard placements for shardId 1220001
|
||||
WARNING: could not find any shard placements for shardId 1220003
|
||||
WARNING: could not find any shard placements for shardId 1220005
|
||||
WARNING: could not find any shard placements for shardId 1220007
|
||||
|
@ -1202,6 +1204,33 @@ SELECT start_metadata_sync_to_all_nodes();
|
|||
t
|
||||
(1 row)
|
||||
|
||||
-- nontransactional sync mode tests
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
-- do not allow nontransactional sync inside transaction block
|
||||
BEGIN;
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
ERROR: do not sync metadata in transaction block when the sync mode is nontransactional
|
||||
HINT: resync after SET citus.metadata_sync_mode TO 'transactional'
|
||||
COMMIT;
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
start_metadata_sync_to_all_nodes
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- do not allow nontransactional node addition inside transaction block
|
||||
BEGIN;
|
||||
SELECT citus_remove_node('localhost', :worker_1_port);
|
||||
citus_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus_add_node('localhost', :worker_1_port);
|
||||
ERROR: do not add node in transaction block when the sync mode is nontransactional
|
||||
HINT: add the node after SET citus.metadata_sync_mode TO 'transactional'
|
||||
COMMIT;
|
||||
RESET citus.metadata_sync_mode;
|
||||
-- verify that at the end of this file, all primary nodes have metadata synced
|
||||
SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
|
||||
?column?
|
||||
|
|
|
@ -1363,10 +1363,12 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
previous_object | current_object
|
||||
---------------------------------------------------------------------
|
||||
| function citus_internal_is_replication_origin_tracking_active() boolean
|
||||
| function citus_internal_mark_node_not_synced(integer,integer) void
|
||||
| function citus_internal_start_replication_origin_tracking() void
|
||||
| function citus_internal_stop_replication_origin_tracking() void
|
||||
| function worker_adjust_identity_column_seq_ranges(regclass) void
|
||||
(4 rows)
|
||||
| function worker_drop_all_shell_tables(boolean)
|
||||
(6 rows)
|
||||
|
||||
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
|
||||
-- show running version
|
||||
|
|
|
@ -72,6 +72,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -89,18 +90,20 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
(29 rows)
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
(32 rows)
|
||||
|
||||
-- Create a test table with constraints and SERIAL and default from user defined sequence
|
||||
CREATE SEQUENCE user_defined_seq;
|
||||
|
@ -127,6 +130,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
|
@ -135,6 +139,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -150,21 +155,26 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(42 rows)
|
||||
(49 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
|
@ -176,6 +186,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
|
@ -185,6 +196,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -200,21 +212,26 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(43 rows)
|
||||
(50 rows)
|
||||
|
||||
-- Show that schema changes are included in the activate node snapshot
|
||||
CREATE SCHEMA mx_testing_schema;
|
||||
|
@ -227,6 +244,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
|
@ -237,6 +255,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -252,21 +271,27 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(52 rows)
|
||||
|
||||
-- Show that append distributed tables are not included in the activate node snapshot
|
||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||
|
@ -285,6 +310,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
|
@ -295,6 +321,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -310,21 +337,27 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(52 rows)
|
||||
|
||||
-- Show that range distributed tables are not included in the activate node snapshot
|
||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||
|
@ -336,6 +369,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION pg_database_owner
|
||||
|
@ -346,6 +380,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -361,21 +396,27 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(52 rows)
|
||||
|
||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||
-- Ensure that hasmetadata=false for all nodes
|
||||
|
@ -1761,6 +1802,7 @@ ALTER TABLE dist_table_1 ADD COLUMN b int;
|
|||
ERROR: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If the node is up, wait until metadata gets synced to it and try again.
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0);
|
||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
||||
ERROR: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If the node is up, wait until metadata gets synced to it and try again.
|
||||
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
|
||||
|
@ -1854,6 +1896,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER TABLE public.dist_table_1 OWNER TO postgres
|
||||
ALTER TABLE public.mx_ref OWNER TO postgres
|
||||
ALTER TABLE public.test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 USING btree (col1)
|
||||
CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 USING btree (col2)
|
||||
|
@ -1874,6 +1917,12 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_test_schema_1.mx_table_1 CASCADE
|
||||
DROP TABLE IF EXISTS mx_test_schema_2.mx_table_2 CASCADE
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
DROP TABLE IF EXISTS public.dist_table_1 CASCADE
|
||||
DROP TABLE IF EXISTS public.mx_ref CASCADE
|
||||
DROP TABLE IF EXISTS public.test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO pg_database_owner;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -1901,18 +1950,35 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_truncate_trigger('public.dist_table_1')
|
||||
SELECT worker_create_truncate_trigger('public.mx_ref')
|
||||
SELECT worker_create_truncate_trigger('public.test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE pg_database_owner
|
||||
SET ROLE pg_database_owner
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL), (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310020, 0, 1, 100020), (1310021, 0, 5, 100021), (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310025, 0, 1, 100025), (1310026, 0, 5, 100026), (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
|
@ -1925,7 +1991,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(87 rows)
|
||||
(111 rows)
|
||||
|
||||
-- shouldn't work since test_table is MX
|
||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||
|
|
|
@ -72,6 +72,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
unnest
|
||||
---------------------------------------------------------------------
|
||||
ALTER DATABASE regression OWNER TO postgres;
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
DELETE FROM pg_catalog.pg_dist_object
|
||||
|
@ -89,18 +90,20 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
|
||||
SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
(29 rows)
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
(32 rows)
|
||||
|
||||
-- Create a test table with constraints and SERIAL and default from user defined sequence
|
||||
CREATE SEQUENCE user_defined_seq;
|
||||
|
@ -127,6 +130,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
DELETE FROM pg_catalog.pg_dist_colocation
|
||||
|
@ -135,6 +139,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -150,21 +155,26 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(42 rows)
|
||||
(49 rows)
|
||||
|
||||
-- Show that CREATE INDEX commands are included in the activate node snapshot
|
||||
CREATE INDEX mx_index ON mx_test_table(col_2);
|
||||
|
@ -176,6 +186,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE public.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap
|
||||
|
@ -185,6 +196,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS public.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -200,21 +212,26 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('public.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(43 rows)
|
||||
(50 rows)
|
||||
|
||||
-- Show that schema changes are included in the activate node snapshot
|
||||
CREATE SCHEMA mx_testing_schema;
|
||||
|
@ -227,6 +244,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
|
@ -237,6 +255,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -252,21 +271,27 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(52 rows)
|
||||
|
||||
-- Show that append distributed tables are not included in the activate node snapshot
|
||||
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
|
||||
|
@ -285,6 +310,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
|
@ -295,6 +321,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -310,21 +337,27 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(52 rows)
|
||||
|
||||
-- Show that range distributed tables are not included in the activate node snapshot
|
||||
UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass;
|
||||
|
@ -336,6 +369,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER SEQUENCE public.user_defined_seq OWNER TO postgres
|
||||
ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1)
|
||||
ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres
|
||||
CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres
|
||||
|
@ -346,6 +380,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -361,21 +396,27 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint')
|
||||
SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''')
|
||||
SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(44 rows)
|
||||
(52 rows)
|
||||
|
||||
-- Test start_metadata_sync_to_node and citus_activate_node UDFs
|
||||
-- Ensure that hasmetadata=false for all nodes
|
||||
|
@ -1761,6 +1802,7 @@ ALTER TABLE dist_table_1 ADD COLUMN b int;
|
|||
ERROR: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If the node is up, wait until metadata gets synced to it and try again.
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0);
|
||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
||||
ERROR: localhost:xxxxx is a metadata node, but is out of sync
|
||||
HINT: If the node is up, wait until metadata gets synced to it and try again.
|
||||
SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
|
||||
|
@ -1854,6 +1896,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
ALTER TABLE public.dist_table_1 OWNER TO postgres
|
||||
ALTER TABLE public.mx_ref OWNER TO postgres
|
||||
ALTER TABLE public.test_table OWNER TO postgres
|
||||
CALL pg_catalog.worker_drop_all_shell_tables(true)
|
||||
CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2)
|
||||
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 USING btree (col1)
|
||||
CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 USING btree (col2)
|
||||
|
@ -1874,6 +1917,12 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
DELETE FROM pg_dist_partition
|
||||
DELETE FROM pg_dist_placement
|
||||
DELETE FROM pg_dist_shard
|
||||
DROP TABLE IF EXISTS mx_test_schema_1.mx_table_1 CASCADE
|
||||
DROP TABLE IF EXISTS mx_test_schema_2.mx_table_2 CASCADE
|
||||
DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE
|
||||
DROP TABLE IF EXISTS public.dist_table_1 CASCADE
|
||||
DROP TABLE IF EXISTS public.mx_ref CASCADE
|
||||
DROP TABLE IF EXISTS public.test_table CASCADE
|
||||
GRANT CREATE ON SCHEMA public TO PUBLIC;
|
||||
GRANT CREATE ON SCHEMA public TO postgres;
|
||||
GRANT USAGE ON SCHEMA public TO PUBLIC;
|
||||
|
@ -1901,18 +1950,35 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
SELECT worker_create_truncate_trigger('public.dist_table_1')
|
||||
SELECT worker_create_truncate_trigger('public.mx_ref')
|
||||
SELECT worker_create_truncate_trigger('public.test_table')
|
||||
SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition
|
||||
SET ROLE postgres
|
||||
SET ROLE postgres
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'off'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
SET citus.enable_ddl_propagation TO 'on'
|
||||
UPDATE pg_dist_local_group SET groupid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL), (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1
|
||||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310020, 0, 1, 100020), (1310021, 0, 5, 100021), (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310025, 0, 1, 100025), (1310026, 0, 5, 100026), (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
|
||||
|
@ -1925,7 +1991,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
|
|||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
|
||||
(87 rows)
|
||||
(111 rows)
|
||||
|
||||
-- shouldn't work since test_table is MX
|
||||
ALTER TABLE test_table ADD COLUMN id3 bigserial;
|
||||
|
|
|
@ -21,3 +21,12 @@ FROM pg_dist_node_metadata, pg_extension WHERE extname = 'citus';
|
|||
-- still, do not NOTICE the version as it changes per release
|
||||
SET client_min_messages TO WARNING;
|
||||
CALL citus_finish_citus_upgrade();
|
||||
-- we should be able to sync metadata in nontransactional way as well
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
start_metadata_sync_to_all_nodes
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
RESET citus.metadata_sync_mode;
|
||||
|
|
|
@ -76,6 +76,7 @@ ORDER BY 1;
|
|||
function citus_internal_global_blocked_processes()
|
||||
function citus_internal_is_replication_origin_tracking_active()
|
||||
function citus_internal_local_blocked_processes()
|
||||
function citus_internal_mark_node_not_synced(integer,integer)
|
||||
function citus_internal_start_replication_origin_tracking()
|
||||
function citus_internal_stop_replication_origin_tracking()
|
||||
function citus_internal_update_placement_metadata(bigint,integer,integer)
|
||||
|
@ -245,6 +246,7 @@ ORDER BY 1;
|
|||
function worker_create_or_replace_object(text)
|
||||
function worker_create_or_replace_object(text[])
|
||||
function worker_create_truncate_trigger(regclass)
|
||||
function worker_drop_all_shell_tables(boolean)
|
||||
function worker_drop_distributed_table(text)
|
||||
function worker_drop_sequence_dependency(text)
|
||||
function worker_drop_shell_table(text)
|
||||
|
@ -322,5 +324,5 @@ ORDER BY 1;
|
|||
view citus_stat_statements
|
||||
view pg_dist_shard_placement
|
||||
view time_partitions
|
||||
(314 rows)
|
||||
(316 rows)
|
||||
|
||||
|
|
|
@ -25,11 +25,11 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.p
|
|||
(19 rows)
|
||||
|
||||
-- on all nodes
|
||||
SELECT run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass) ORDER BY 1;$$) ORDER BY 1;
|
||||
SELECT run_command_on_workers($$SELECT array_agg(worker_object) FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) worker_object FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass) ORDER BY 1) worker_objects;$$) ORDER BY 1;
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57636,t,"{""(type,{post_11_upgrade.my_type},{})"",""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test_my_type}\\"",{})"",""(view,\\""{post_11_upgrade,non_dist_upgrade_ref_view_2}\\"",{})"",""(view,\\""{post_11_upgrade,reporting_line}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})""}")
|
||||
(localhost,57637,t,"{""(type,{post_11_upgrade.my_type},{})"",""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test_my_type}\\"",{})"",""(view,\\""{post_11_upgrade,non_dist_upgrade_ref_view_2}\\"",{})"",""(view,\\""{post_11_upgrade,reporting_line}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})""}")
|
||||
(localhost,57636,t,"{""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})"",""(type,{post_11_upgrade.my_type},{})"",""(view,\\""{post_11_upgrade,non_dist_upgrade_ref_view_2}\\"",{})"",""(view,\\""{post_11_upgrade,reporting_line}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test_my_type}\\"",{})""}")
|
||||
(localhost,57637,t,"{""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})"",""(type,{post_11_upgrade.my_type},{})"",""(view,\\""{post_11_upgrade,non_dist_upgrade_ref_view_2}\\"",{})"",""(view,\\""{post_11_upgrade,reporting_line}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test_my_type}\\"",{})""}")
|
||||
(2 rows)
|
||||
|
||||
-- Create the necessary test utility function
|
||||
|
|
|
@ -32,6 +32,7 @@ test: failure_single_mod
|
|||
test: failure_savepoints
|
||||
test: failure_multi_row_insert
|
||||
test: failure_mx_metadata_sync
|
||||
test: failure_mx_metadata_sync_multi_trans
|
||||
test: failure_connection_establishment
|
||||
|
||||
# this test syncs metadata to the workers
|
||||
|
|
|
@ -0,0 +1,282 @@
|
|||
--
|
||||
-- failure_mx_metadata_sync_multi_trans.sql
|
||||
--
|
||||
CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans;
|
||||
SET SEARCH_PATH = mx_metadata_sync_multi_trans;
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.next_shard_id TO 16000000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
|
||||
SELECT pg_backend_pid() as pid \gset
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
|
||||
\set VERBOSITY terse
|
||||
SET client_min_messages TO ERROR;
|
||||
|
||||
-- Create roles
|
||||
CREATE ROLE foo1;
|
||||
CREATE ROLE foo2;
|
||||
|
||||
-- Create sequence
|
||||
CREATE SEQUENCE seq;
|
||||
|
||||
-- Create colocated distributed tables
|
||||
CREATE TABLE dist1 (id int PRIMARY KEY default nextval('seq'));
|
||||
SELECT create_distributed_table('dist1', 'id');
|
||||
INSERT INTO dist1 SELECT i FROM generate_series(1,100) i;
|
||||
|
||||
CREATE TABLE dist2 (id int PRIMARY KEY default nextval('seq'));
|
||||
SELECT create_distributed_table('dist2', 'id');
|
||||
INSERT INTO dist2 SELECT i FROM generate_series(1,100) i;
|
||||
|
||||
-- Create a reference table
|
||||
CREATE TABLE ref (id int UNIQUE);
|
||||
SELECT create_reference_table('ref');
|
||||
INSERT INTO ref SELECT i FROM generate_series(1,100) i;
|
||||
|
||||
-- Create local tables
|
||||
CREATE TABLE loc1 (id int PRIMARY KEY);
|
||||
INSERT INTO loc1 SELECT i FROM generate_series(1,100) i;
|
||||
|
||||
CREATE TABLE loc2 (id int REFERENCES loc1(id));
|
||||
INSERT INTO loc2 SELECT i FROM generate_series(1,100) i;
|
||||
|
||||
SELECT citus_set_coordinator_host('localhost', :master_port);
|
||||
SELECT citus_add_local_table_to_metadata('loc1', cascade_via_foreign_keys => true);
|
||||
|
||||
-- Create partitioned distributed table
|
||||
CREATE TABLE orders (
|
||||
id bigint,
|
||||
order_time timestamp without time zone NOT NULL,
|
||||
region_id bigint NOT NULL
|
||||
)
|
||||
PARTITION BY RANGE (order_time);
|
||||
|
||||
SELECT create_time_partitions(
|
||||
table_name := 'orders',
|
||||
partition_interval := '1 day',
|
||||
start_from := '2020-01-01',
|
||||
end_at := '2020-01-11'
|
||||
);
|
||||
SELECT create_distributed_table('orders', 'region_id');
|
||||
|
||||
-- Initially turn metadata sync to worker2 off because we'll ingest errors to start/stop metadata sync operations
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
SELECT isactive, metadatasynced, hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
||||
|
||||
-- Failure to send local group id
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_local_group SET groupid").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to drop node metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to send node metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to drop sequence
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to drop shell table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shell_tables").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shell_tables").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to delete all pg_dist_partition metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to delete all pg_dist_shard metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to delete all pg_dist_placement metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to delete all pg_dist_object metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_object").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_object").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to delete all pg_dist_colocation metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_colocation").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_colocation").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to alter or create role
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to set database owner
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Filure to create schema
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to create sequence
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to create distributed table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to create reference table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to create local table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.loc1").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.loc1").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to create distributed partitioned table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to create distributed partition table
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders_p2020_01_05").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to attach partition
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_trans.orders ATTACH PARTITION mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_trans.orders ATTACH PARTITION mx_metadata_sync_multi_trans.orders_p2020_01_05").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to add partition metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to add shard metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to add placement metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to add colocation metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to add distributed object metadata
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to set isactive to true
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to set metadatasynced to true
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET metadatasynced = TRUE").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET metadatasynced = TRUE").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Failure to set hasmetadata to true
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET hasmetadata = TRUE").cancel(' || :pid || ')');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET hasmetadata = TRUE").kill()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Show node metadata info on coordinator after failures
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||
|
||||
-- Show that we can still query the node from coordinator
|
||||
SELECT COUNT(*) FROM dist1;
|
||||
|
||||
-- Verify that the value 103 belongs to a shard at the node to which we failed to sync metadata
|
||||
SELECT 103 AS failed_node_val \gset
|
||||
SELECT nodeid AS failed_nodeid FROM pg_dist_node WHERE metadatasynced = false \gset
|
||||
SELECT get_shard_id_for_distribution_column('dist1', :failed_node_val) AS shardid \gset
|
||||
SELECT groupid = :failed_nodeid FROM pg_dist_placement WHERE shardid = :shardid;
|
||||
|
||||
-- Show that we can still insert into a shard at the node from coordinator
|
||||
INSERT INTO dist1 VALUES (:failed_node_val);
|
||||
|
||||
-- Show that we can still update a shard at the node from coordinator
|
||||
UPDATE dist1 SET id = :failed_node_val WHERE id = :failed_node_val;
|
||||
|
||||
-- Show that we can still delete from a shard at the node from coordinator
|
||||
DELETE FROM dist1 WHERE id = :failed_node_val;
|
||||
|
||||
-- Show that DDL would still propagate to the node
|
||||
SET client_min_messages TO NOTICE;
|
||||
SET citus.log_remote_commands TO 1;
|
||||
CREATE SCHEMA dummy;
|
||||
SET citus.log_remote_commands TO 0;
|
||||
SET client_min_messages TO ERROR;
|
||||
|
||||
-- Successfully activate the node after many failures
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
-- Activate the node once more to verify it works again with already synced metadata
|
||||
SELECT citus_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
||||
-- Show node metadata info on worker2 and coordinator after success
|
||||
\c - - - :worker_2_port
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||
\c - - - :master_port
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeport;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
|
||||
RESET citus.metadata_sync_mode;
|
||||
DROP SCHEMA dummy;
|
||||
DROP SCHEMA mx_metadata_sync_multi_trans CASCADE;
|
||||
DROP ROLE foo1;
|
||||
DROP ROLE foo2;
|
||||
SELECT citus_remove_node('localhost', :master_port);
|
|
@ -749,15 +749,6 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
|||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- the user only allowed to delete shards in a distributed transaction
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SET application_name to 'citus_internal gpid=10000000001';
|
||||
\set VERBOSITY terse
|
||||
WITH shard_data(shardid)
|
||||
AS (VALUES (1420007))
|
||||
SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data;
|
||||
ROLLBACK;
|
||||
|
||||
-- the user cannot delete non-existing shards
|
||||
BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
|
||||
SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02');
|
||||
|
|
|
@ -4,9 +4,12 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1;
|
|||
|
||||
-- Tests functions related to cluster membership
|
||||
|
||||
-- add the nodes to the cluster
|
||||
-- add the first node to the cluster in transactional mode
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
-- add the second node in nontransactional mode
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
RESET citus.metadata_sync_mode;
|
||||
|
||||
-- I am coordinator
|
||||
SELECT citus_is_coordinator();
|
||||
|
@ -506,5 +509,19 @@ BEGIN;
|
|||
COMMIT;
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
|
||||
-- nontransactional sync mode tests
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
-- do not allow nontransactional sync inside transaction block
|
||||
BEGIN;
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
COMMIT;
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
-- do not allow nontransactional node addition inside transaction block
|
||||
BEGIN;
|
||||
SELECT citus_remove_node('localhost', :worker_1_port);
|
||||
SELECT citus_add_node('localhost', :worker_1_port);
|
||||
COMMIT;
|
||||
RESET citus.metadata_sync_mode;
|
||||
|
||||
-- verify that at the end of this file, all primary nodes have metadata synced
|
||||
SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary';
|
||||
|
|
|
@ -17,3 +17,8 @@ FROM pg_dist_node_metadata, pg_extension WHERE extname = 'citus';
|
|||
-- still, do not NOTICE the version as it changes per release
|
||||
SET client_min_messages TO WARNING;
|
||||
CALL citus_finish_citus_upgrade();
|
||||
|
||||
-- we should be able to sync metadata in nontransactional way as well
|
||||
SET citus.metadata_sync_mode TO 'nontransactional';
|
||||
SELECT start_metadata_sync_to_all_nodes();
|
||||
RESET citus.metadata_sync_mode;
|
||||
|
|
|
@ -4,7 +4,7 @@ SET search_path = post_11_upgrade;
|
|||
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.employees'::regclass, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.my_type_for_view'::regtype, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_table_for_view'::regclass, 'post_11_upgrade.non_dist_upgrade_test_view'::regclass, 'post_11_upgrade.non_dist_upgrade_test_view_local_join'::regclass, 'post_11_upgrade.non_dist_upgrade_multiple_dist_view'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass, 'post_11_upgrade.v_test_1'::regclass, 'post_11_upgrade.v_test_2'::regclass, 'post_11_upgrade.owned_by_extension_table'::regclass, 'post_11_upgrade.materialized_view'::regclass, 'post_11_upgrade.owned_by_extension_view'::regclass, 'post_11_upgrade.local_type'::regtype, 'post_11_upgrade.non_dist_dist_table_for_view'::regclass, 'post_11_upgrade.depends_on_nothing_1'::regclass, 'post_11_upgrade.depends_on_nothing_2'::regclass, 'post_11_upgrade.depends_on_pg'::regclass, 'post_11_upgrade.depends_on_citus'::regclass, 'post_11_upgrade.depends_on_seq'::regclass, 'post_11_upgrade.depends_on_seq_and_no_support'::regclass) ORDER BY 1;
|
||||
|
||||
-- on all nodes
|
||||
SELECT run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass) ORDER BY 1;$$) ORDER BY 1;
|
||||
SELECT run_command_on_workers($$SELECT array_agg(worker_object) FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) worker_object FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass) ORDER BY 1) worker_objects;$$) ORDER BY 1;
|
||||
|
||||
-- Create the necessary test utility function
|
||||
CREATE OR REPLACE FUNCTION activate_node_snapshot()
|
||||
|
|
Loading…
Reference in New Issue