diff --git a/.circleci/config.yml b/.circleci/config.yml index e0ec494fe..39b144110 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -468,7 +468,7 @@ jobs: when: on_fail - store_artifacts: name: 'Save tap logs' - path: /home/circleci/project/src/test/recovery/tmp_check/log + path: /home/circleci/project/src/test/<< parameters.suite >>/tmp_check/log - store_artifacts: name: 'Save core dumps' path: /tmp/core_dumps @@ -598,6 +598,12 @@ workflows: image_tag: '<< pipeline.parameters.pg13_version >>' suite: recovery requires: [build-13] + - tap-test-citus: + name: 'test-13_tap-columnar-freezing' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + suite: columnar_freezing + requires: [build-13] - test-citus: name: 'test-13_check-failure' pg_major: 13 @@ -666,6 +672,12 @@ workflows: image_tag: '<< pipeline.parameters.pg14_version >>' suite: recovery requires: [build-14] + - tap-test-citus: + name: 'test-14_tap-columnar-freezing' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + suite: columnar_freezing + requires: [build-14] - test-citus: name: 'test-14_check-failure' pg_major: 14 diff --git a/CHANGELOG.md b/CHANGELOG.md index 50a9c57e2..94bafe5d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +### citus v11.0.1_beta (April 11, 2022) ### + +* Adds propagation of `DOMAIN` objects + +* Adds support for `TABLESAMPLE` + +* Allows adding a unique constraint with an index + +* Fixes a bug that could cause `EXPLAIN ANALYZE` to fail for prepared statements + with custom type + +* Fixes a bug that could cause Citus not to create function in transaction block + properly + +* Fixes a bug that could cause returning invalid JSON when running + `EXPLAIN ANALYZE` with subplans + +* Fixes a bug that prevents non-client backends from accessing shards + ### citus v11.0.0_beta (March 22, 2022) ### * Drops support for PostgreSQL 12 diff --git a/src/backend/columnar/README.md b/src/backend/columnar/README.md index 218c99df8..e8681e0f3 100644 --- a/src/backend/columnar/README.md +++ b/src/backend/columnar/README.md @@ -90,38 +90,25 @@ data. Set options using: ```sql -alter_columnar_table_set( - relid REGCLASS, - chunk_group_row_limit INT4 DEFAULT NULL, - stripe_row_limit INT4 DEFAULT NULL, - compression NAME DEFAULT NULL, - compression_level INT4) -``` - -For example: - -```sql -SELECT alter_columnar_table_set( - 'my_columnar_table', - compression => 'none', - stripe_row_limit => 10000); +ALTER TABLE my_columnar_table SET + (columnar.compression = none, columnar.stripe_row_limit = 10000); ``` The following options are available: -* **compression**: `[none|pglz|zstd|lz4|lz4hc]` - set the compression type +* **columnar.compression**: `[none|pglz|zstd|lz4|lz4hc]` - set the compression type for _newly-inserted_ data. Existing data will not be recompressed/decompressed. The default value is `zstd` (if support has been compiled in). -* **compression_level**: ```` - Sets compression level. Valid +* **columnar.compression_level**: ```` - Sets compression level. Valid settings are from 1 through 19. If the compression method does not support the level chosen, the closest level will be selected instead. -* **stripe_row_limit**: ```` - the maximum number of rows per +* **columnar.stripe_row_limit**: ```` - the maximum number of rows per stripe for _newly-inserted_ data. Existing stripes of data will not be changed and may have more rows than this maximum value. The default value is `150000`. -* **chunk_group_row_limit**: ```` - the maximum number of rows per +* **columnar.chunk_group_row_limit**: ```` - the maximum number of rows per chunk for _newly-inserted_ data. Existing chunks of data will not be changed and may have more rows than this maximum value. The default value is `10000`. diff --git a/src/backend/columnar/columnar_debug.c b/src/backend/columnar/columnar_debug.c index 220d259fe..e6b19f768 100644 --- a/src/backend/columnar/columnar_debug.c +++ b/src/backend/columnar/columnar_debug.c @@ -115,8 +115,6 @@ columnar_storage_info(PG_FUNCTION_ARGS) RelationGetRelationName(rel)))); } - RelationOpenSmgr(rel); - Datum values[STORAGE_INFO_NATTS] = { 0 }; bool nulls[STORAGE_INFO_NATTS] = { 0 }; diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index 62d64861f..34939710d 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -59,6 +59,7 @@ #include "utils/rel.h" #include "utils/relfilenodemap.h" +#define COLUMNAR_RELOPTION_NAMESPACE "columnar" typedef struct { @@ -82,6 +83,7 @@ typedef enum RowNumberLookupMode FIND_GREATER } RowNumberLookupMode; +static void ParseColumnarRelOptions(List *reloptions, ColumnarOptions *options); static void InsertEmptyStripeMetadataRow(uint64 storageId, uint64 stripeId, uint32 columnCount, uint32 chunkGroupRowCount, uint64 firstRowNumber); @@ -218,6 +220,154 @@ InitColumnarOptions(Oid regclass) } +/* + * ParseColumnarRelOptions - update the given 'options' using the given list + * of DefElem. + */ +static void +ParseColumnarRelOptions(List *reloptions, ColumnarOptions *options) +{ + ListCell *lc = NULL; + + foreach(lc, reloptions) + { + DefElem *elem = castNode(DefElem, lfirst(lc)); + + if (elem->defnamespace == NULL || + strcmp(elem->defnamespace, COLUMNAR_RELOPTION_NAMESPACE) != 0) + { + ereport(ERROR, (errmsg("columnar options must have the prefix \"%s\"", + COLUMNAR_RELOPTION_NAMESPACE))); + } + + if (strcmp(elem->defname, "chunk_group_row_limit") == 0) + { + options->chunkRowCount = (elem->arg == NULL) ? + columnar_chunk_group_row_limit : defGetInt64(elem); + if (options->chunkRowCount < CHUNK_ROW_COUNT_MINIMUM || + options->chunkRowCount > CHUNK_ROW_COUNT_MAXIMUM) + { + ereport(ERROR, (errmsg("chunk group row count limit out of range"), + errhint("chunk group row count limit must be between " + UINT64_FORMAT " and " UINT64_FORMAT, + (uint64) CHUNK_ROW_COUNT_MINIMUM, + (uint64) CHUNK_ROW_COUNT_MAXIMUM))); + } + } + else if (strcmp(elem->defname, "stripe_row_limit") == 0) + { + options->stripeRowCount = (elem->arg == NULL) ? + columnar_stripe_row_limit : defGetInt64(elem); + + if (options->stripeRowCount < STRIPE_ROW_COUNT_MINIMUM || + options->stripeRowCount > STRIPE_ROW_COUNT_MAXIMUM) + { + ereport(ERROR, (errmsg("stripe row count limit out of range"), + errhint("stripe row count limit must be between " + UINT64_FORMAT " and " UINT64_FORMAT, + (uint64) STRIPE_ROW_COUNT_MINIMUM, + (uint64) STRIPE_ROW_COUNT_MAXIMUM))); + } + } + else if (strcmp(elem->defname, "compression") == 0) + { + options->compressionType = (elem->arg == NULL) ? + columnar_compression : ParseCompressionType( + defGetString(elem)); + + if (options->compressionType == COMPRESSION_TYPE_INVALID) + { + ereport(ERROR, (errmsg("unknown compression type for columnar table: %s", + quote_identifier(defGetString(elem))))); + } + } + else if (strcmp(elem->defname, "compression_level") == 0) + { + options->compressionLevel = (elem->arg == NULL) ? + columnar_compression_level : defGetInt64(elem); + + if (options->compressionLevel < COMPRESSION_LEVEL_MIN || + options->compressionLevel > COMPRESSION_LEVEL_MAX) + { + ereport(ERROR, (errmsg("compression level out of range"), + errhint("compression level must be between %d and %d", + COMPRESSION_LEVEL_MIN, + COMPRESSION_LEVEL_MAX))); + } + } + else + { + ereport(ERROR, (errmsg("unrecognized columnar storage parameter \"%s\"", + elem->defname))); + } + } +} + + +/* + * ExtractColumnarOptions - extract columnar options from inOptions, appending + * to inoutColumnarOptions. Return the remaining (non-columnar) options. + */ +List * +ExtractColumnarRelOptions(List *inOptions, List **inoutColumnarOptions) +{ + List *otherOptions = NIL; + + ListCell *lc = NULL; + foreach(lc, inOptions) + { + DefElem *elem = castNode(DefElem, lfirst(lc)); + + if (elem->defnamespace != NULL && + strcmp(elem->defnamespace, COLUMNAR_RELOPTION_NAMESPACE) == 0) + { + *inoutColumnarOptions = lappend(*inoutColumnarOptions, elem); + } + else + { + otherOptions = lappend(otherOptions, elem); + } + } + + /* validate options */ + ColumnarOptions dummy = { 0 }; + ParseColumnarRelOptions(*inoutColumnarOptions, &dummy); + + return otherOptions; +} + + +/* + * SetColumnarRelOptions - apply the list of DefElem options to the + * relation. If there are duplicates, the last one in the list takes effect. + */ +void +SetColumnarRelOptions(RangeVar *rv, List *reloptions) +{ + ColumnarOptions options = { 0 }; + + if (reloptions == NIL) + { + return; + } + + Relation rel = relation_openrv(rv, AccessShareLock); + Oid relid = RelationGetRelid(rel); + relation_close(rel, NoLock); + + /* get existing or default options */ + if (!ReadColumnarOptions(relid, &options)) + { + /* if extension doesn't exist, just return */ + return; + } + + ParseColumnarRelOptions(reloptions, &options); + + SetColumnarOptions(relid, &options); +} + + /* * SetColumnarOptions writes the passed table options as the authoritive options to the * table irregardless of the optiones already existing or not. This can be used to put a @@ -1433,7 +1583,7 @@ DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple) simple_heap_delete(state->rel, tid); /* execute AFTER ROW DELETE Triggers to enforce constraints */ - ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL); + ExecARDeleteTriggers_compat(estate, resultRelInfo, tid, NULL, NULL, false); } @@ -1670,7 +1820,15 @@ ColumnarChunkGroupIndexRelationId(void) static Oid ColumnarNamespaceId(void) { - return get_namespace_oid("columnar", false); + Oid namespace = get_namespace_oid("columnar_internal", true); + + /* if schema is earlier than 11.1-1 */ + if (!OidIsValid(namespace)) + { + namespace = get_namespace_oid("columnar", false); + } + + return namespace; } @@ -1712,6 +1870,13 @@ columnar_relation_storageid(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); Relation relation = relation_open(relationId, AccessShareLock); + + if (!pg_class_ownercheck(relationId, GetUserId())) + { + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE, + get_rel_name(relationId)); + } + if (!IsColumnarTableAmTable(relationId)) { elog(ERROR, "relation \"%s\" is not a columnar table", @@ -1738,11 +1903,10 @@ ColumnarStorageUpdateIfNeeded(Relation rel, bool isUpgrade) return; } - RelationOpenSmgr(rel); - BlockNumber nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber nblocks = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); if (nblocks < 2) { - ColumnarStorageInit(rel->rd_smgr, ColumnarMetadataNewStorageId()); + ColumnarStorageInit(RelationGetSmgr(rel), ColumnarMetadataNewStorageId()); return; } diff --git a/src/backend/columnar/columnar_storage.c b/src/backend/columnar/columnar_storage.c index 71fc75ccb..9712e7160 100644 --- a/src/backend/columnar/columnar_storage.c +++ b/src/backend/columnar/columnar_storage.c @@ -44,6 +44,8 @@ #include "storage/bufmgr.h" #include "storage/lmgr.h" +#include "pg_version_compat.h" + #include "columnar/columnar.h" #include "columnar/columnar_storage.h" @@ -354,8 +356,7 @@ ColumnarStorageGetReservedOffset(Relation rel, bool force) bool ColumnarStorageIsCurrent(Relation rel) { - RelationOpenSmgr(rel); - BlockNumber nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber nblocks = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); if (nblocks < 2) { @@ -439,8 +440,7 @@ ColumnarStorageReserveData(Relation rel, uint64 amount) PhysicalAddr final = LogicalToPhysical(nextReservation - 1); /* extend with new pages */ - RelationOpenSmgr(rel); - BlockNumber nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber nblocks = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); while (nblocks <= final.blockno) { @@ -547,8 +547,7 @@ ColumnarStorageTruncate(Relation rel, uint64 newDataReservation) rel->rd_id, newDataReservation); } - RelationOpenSmgr(rel); - BlockNumber old_rel_pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber old_rel_pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); if (old_rel_pages == 0) { /* nothing to do */ @@ -627,8 +626,7 @@ ColumnarOverwriteMetapage(Relation relation, ColumnarMetapage columnarMetapage) static ColumnarMetapage ColumnarMetapageRead(Relation rel, bool force) { - RelationOpenSmgr(rel); - BlockNumber nblocks = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber nblocks = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); if (nblocks == 0) { /* diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 891f937a7..af964402b 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -20,6 +20,7 @@ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/index.h" +#include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_am.h" #include "catalog/pg_publication.h" @@ -104,9 +105,6 @@ typedef struct IndexFetchColumnarData MemoryContext scanContext; } IndexFetchColumnarData; -/* available to other extensions using find_rendezvous_variable() */ -static ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook = NULL; - static object_access_hook_type PrevObjectAccessHook = NULL; static ProcessUtility_hook_type PrevProcessUtilityHook = NULL; @@ -117,6 +115,8 @@ static void ColumnarTriggerCreateHook(Oid tgid); static void ColumnarTableAMObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); +static RangeVar * ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, + List **columnarOptions); static void ColumnarProcessUtility(PlannedStmt *pstmt, const char *queryString, #if PG_VERSION_NUM >= PG_VERSION_14 @@ -882,7 +882,7 @@ columnar_relation_set_new_filenode(Relation rel, *freezeXid = RecentXmin; *minmulti = GetOldestMultiXactId(); - SMgrRelation srel = RelationCreateStorage(*newrnode, persistence); + SMgrRelation srel = RelationCreateStorage_compat(*newrnode, persistence, true); ColumnarStorageInit(srel, ColumnarMetadataNewStorageId()); InitColumnarOptions(rel->rd_id); @@ -914,8 +914,7 @@ columnar_relation_nontransactional_truncate(Relation rel) RelationTruncate(rel, 0); uint64 storageId = ColumnarMetadataNewStorageId(); - RelationOpenSmgr(rel); - ColumnarStorageInit(rel->rd_smgr, storageId); + ColumnarStorageInit(RelationGetSmgr(rel), storageId); } @@ -1034,6 +1033,27 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed) } +/* + * ColumnarTableTupleCount returns the number of tuples that columnar + * table with relationId has by using stripe metadata. + */ +static uint64 +ColumnarTableTupleCount(Relation relation) +{ + List *stripeList = StripesForRelfilenode(relation->rd_node); + uint64 tupleCount = 0; + + ListCell *lc = NULL; + foreach(lc, stripeList) + { + StripeMetadata *stripe = lfirst(lc); + tupleCount += stripe->rowCount; + } + + return tupleCount; +} + + /* * columnar_vacuum_rel implements VACUUM without FULL option. */ @@ -1050,6 +1070,9 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params, return; } + pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM, + RelationGetRelid(rel)); + /* * If metapage version of relation is older, then we hint users to VACUUM * the relation in ColumnarMetapageCheckVersion. So if needed, upgrade @@ -1073,6 +1096,78 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params, { TruncateColumnar(rel, elevel); } + + BlockNumber new_rel_pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); + + /* get the number of indexes */ + List *indexList = RelationGetIndexList(rel); + int nindexes = list_length(indexList); + + TransactionId oldestXmin; + TransactionId freezeLimit; + MultiXactId multiXactCutoff; + + /* initialize xids */ +#if PG_VERSION_NUM >= PG_VERSION_15 + MultiXactId oldestMxact; + vacuum_set_xid_limits(rel, + params->freeze_min_age, + params->freeze_table_age, + params->multixact_freeze_min_age, + params->multixact_freeze_table_age, + &oldestXmin, &oldestMxact, + &freezeLimit, &multiXactCutoff); + + Assert(MultiXactIdPrecedesOrEquals(multiXactCutoff, oldestMxact)); +#else + TransactionId xidFullScanLimit; + MultiXactId mxactFullScanLimit; + vacuum_set_xid_limits(rel, + params->freeze_min_age, + params->freeze_table_age, + params->multixact_freeze_min_age, + params->multixact_freeze_table_age, + &oldestXmin, &freezeLimit, &xidFullScanLimit, + &multiXactCutoff, &mxactFullScanLimit); +#endif + + Assert(TransactionIdPrecedesOrEquals(freezeLimit, oldestXmin)); + + /* + * Columnar storage doesn't hold any transaction IDs, so we can always + * just advance to the most aggressive value. + */ + TransactionId newRelFrozenXid = oldestXmin; +#if PG_VERSION_NUM >= PG_VERSION_15 + MultiXactId newRelminMxid = oldestMxact; +#else + MultiXactId newRelminMxid = multiXactCutoff; +#endif + + double new_live_tuples = ColumnarTableTupleCount(rel); + + /* all visible pages are always 0 */ + BlockNumber new_rel_allvisible = 0; + +#if PG_VERSION_NUM >= PG_VERSION_15 + bool frozenxid_updated; + bool minmulti_updated; + + vac_update_relstats(rel, new_rel_pages, new_live_tuples, + new_rel_allvisible, nindexes > 0, + newRelFrozenXid, newRelminMxid, + &frozenxid_updated, &minmulti_updated, false); +#else + vac_update_relstats(rel, new_rel_pages, new_live_tuples, + new_rel_allvisible, nindexes > 0, + newRelFrozenXid, newRelminMxid, false); +#endif + + pgstat_report_vacuum(RelationGetRelid(rel), + rel->rd_rel->relisshared, + Max(new_live_tuples, 0), + 0); + pgstat_progress_end_command(); } @@ -1137,8 +1232,7 @@ LogRelationStats(Relation rel, int elevel) totalStripeLength += stripe->dataLength; } - RelationOpenSmgr(rel); - uint64 relPages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + uint64 relPages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); RelationCloseSmgr(rel); Datum storageId = DirectFunctionCall1(columnar_relation_storageid, @@ -1240,8 +1334,7 @@ TruncateColumnar(Relation rel, int elevel) uint64 newDataReservation = Max(GetHighestUsedAddress(rel->rd_node) + 1, ColumnarFirstLogicalOffset); - RelationOpenSmgr(rel); - BlockNumber old_rel_pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber old_rel_pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); if (!ColumnarStorageTruncate(rel, newDataReservation)) { @@ -1249,8 +1342,7 @@ TruncateColumnar(Relation rel, int elevel) return; } - RelationOpenSmgr(rel); - BlockNumber new_rel_pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + BlockNumber new_rel_pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); /* * We can release the exclusive lock as soon as we have truncated. @@ -1785,20 +1877,17 @@ columnar_relation_size(Relation rel, ForkNumber forkNumber) uint64 nblocks = 0; - /* Open it at the smgr level if not already done */ - RelationOpenSmgr(rel); - /* InvalidForkNumber indicates returning the size for all forks */ if (forkNumber == InvalidForkNumber) { for (int i = 0; i < MAX_FORKNUM; i++) { - nblocks += smgrnblocks(rel->rd_smgr, i); + nblocks += smgrnblocks(RelationGetSmgr(rel), i); } } else { - nblocks = smgrnblocks(rel->rd_smgr, forkNumber); + nblocks = smgrnblocks(RelationGetSmgr(rel), forkNumber); } return nblocks * BLCKSZ; @@ -1820,8 +1909,7 @@ columnar_estimate_rel_size(Relation rel, int32 *attr_widths, double *allvisfrac) { CheckCitusColumnarVersion(ERROR); - RelationOpenSmgr(rel); - *pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); + *pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM); *tuples = ColumnarTableRowCount(rel); /* @@ -1911,11 +1999,6 @@ ColumnarSubXactCallback(SubXactEvent event, SubTransactionId mySubid, void columnar_tableam_init() { - ColumnarTableSetOptions_hook_type **ColumnarTableSetOptions_hook_ptr = - (ColumnarTableSetOptions_hook_type **) find_rendezvous_variable( - COLUMNAR_SETOPTIONS_HOOK_SYM); - *ColumnarTableSetOptions_hook_ptr = &ColumnarTableSetOptions_hook; - RegisterXactCallback(ColumnarXactCallback, NULL); RegisterSubXactCallback(ColumnarSubXactCallback, NULL); @@ -2089,6 +2172,71 @@ ColumnarTableAMObjectAccessHook(ObjectAccessType access, Oid classId, Oid object } +/* + * ColumnarProcessAlterTable - if modifying a columnar table, extract columnar + * options and return the table's RangeVar. + */ +static RangeVar * +ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions) +{ + RangeVar *columnarRangeVar = NULL; + Relation rel = relation_openrv_extended(alterTableStmt->relation, AccessShareLock, + alterTableStmt->missing_ok); + + if (rel == NULL) + { + return NULL; + } + + /* track separately in case of ALTER TABLE ... SET ACCESS METHOD */ + bool srcIsColumnar = rel->rd_tableam == GetColumnarTableAmRoutine(); + bool destIsColumnar = srcIsColumnar; + + ListCell *lc = NULL; + foreach(lc, alterTableStmt->cmds) + { + AlterTableCmd *alterTableCmd = castNode(AlterTableCmd, lfirst(lc)); + + if (alterTableCmd->subtype == AT_SetRelOptions || + alterTableCmd->subtype == AT_ResetRelOptions) + { + List *options = castNode(List, alterTableCmd->def); + + alterTableCmd->def = (Node *) ExtractColumnarRelOptions( + options, columnarOptions); + + if (destIsColumnar) + { + columnarRangeVar = alterTableStmt->relation; + } + } +#if PG_VERSION_NUM >= PG_VERSION_15 + else if (alterTableCmd->subtype == AT_SetAccessMethod) + { + if (columnarRangeVar || *columnarOptions) + { + ereport(ERROR, (errmsg( + "ALTER TABLE cannot alter the access method after altering storage parameters"), + errhint( + "Specify SET ACCESS METHOD before storage parameters, or use separate ALTER TABLE commands."))); + } + + destIsColumnar = (strcmp(alterTableCmd->name, COLUMNAR_AM_NAME) == 0); + + if (srcIsColumnar && !destIsColumnar) + { + DeleteColumnarTableOptions(RelationGetRelid(rel), true); + } + } +#endif /* PG_VERSION_15 */ + } + + relation_close(rel, NoLock); + + return columnarRangeVar; +} + + /* * Utility hook for columnar tables. */ @@ -2113,27 +2261,160 @@ ColumnarProcessUtility(PlannedStmt *pstmt, Node *parsetree = pstmt->utilityStmt; - if (IsA(parsetree, IndexStmt)) + RangeVar *columnarRangeVar = NULL; + List *columnarOptions = NIL; + + switch (nodeTag(parsetree)) { - IndexStmt *indexStmt = (IndexStmt *) parsetree; - - Relation rel = relation_openrv(indexStmt->relation, - indexStmt->concurrent ? ShareUpdateExclusiveLock : - ShareLock); - - if (rel->rd_tableam == GetColumnarTableAmRoutine()) + case T_IndexStmt: { - CheckCitusColumnarVersion(ERROR); - if (!ColumnarSupportsIndexAM(indexStmt->accessMethod)) + IndexStmt *indexStmt = (IndexStmt *) parsetree; + + Relation rel = relation_openrv(indexStmt->relation, + indexStmt->concurrent ? + ShareUpdateExclusiveLock : + ShareLock); + + if (rel->rd_tableam == GetColumnarTableAmRoutine()) { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unsupported access method for the " - "index on columnar table %s", - RelationGetRelationName(rel)))); + CheckCitusColumnarVersion(ERROR); + if (!ColumnarSupportsIndexAM(indexStmt->accessMethod)) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported access method for the " + "index on columnar table %s", + RelationGetRelationName(rel)))); + } } + + RelationClose(rel); + break; } - RelationClose(rel); + case T_CreateStmt: + { + CreateStmt *createStmt = castNode(CreateStmt, parsetree); + bool no_op = false; + + if (createStmt->if_not_exists) + { + Oid existing_relid; + + /* use same check as transformCreateStmt */ + (void) RangeVarGetAndCheckCreationNamespace( + createStmt->relation, AccessShareLock, &existing_relid); + + no_op = OidIsValid(existing_relid); + } + + if (!no_op && createStmt->accessMethod != NULL && + !strcmp(createStmt->accessMethod, COLUMNAR_AM_NAME)) + { + columnarRangeVar = createStmt->relation; + createStmt->options = ExtractColumnarRelOptions(createStmt->options, + &columnarOptions); + } + break; + } + + case T_CreateTableAsStmt: + { + CreateTableAsStmt *createTableAsStmt = castNode(CreateTableAsStmt, parsetree); + IntoClause *into = createTableAsStmt->into; + bool no_op = false; + + if (createTableAsStmt->if_not_exists) + { + Oid existing_relid; + + /* use same check as transformCreateStmt */ + (void) RangeVarGetAndCheckCreationNamespace( + into->rel, AccessShareLock, &existing_relid); + + no_op = OidIsValid(existing_relid); + } + + if (!no_op && into->accessMethod != NULL && + !strcmp(into->accessMethod, COLUMNAR_AM_NAME)) + { + columnarRangeVar = into->rel; + into->options = ExtractColumnarRelOptions(into->options, + &columnarOptions); + } + break; + } + + case T_AlterTableStmt: + { + AlterTableStmt *alterTableStmt = castNode(AlterTableStmt, parsetree); + columnarRangeVar = ColumnarProcessAlterTable(alterTableStmt, + &columnarOptions); + break; + } + + default: + + /* FALL THROUGH */ + break; + } + + if (columnarOptions != NIL && columnarRangeVar == NULL) + { + ereport(ERROR, + (errmsg("columnar storage parameters specified on non-columnar table"))); + } + + if (IsA(parsetree, CreateExtensionStmt)) + { + CreateExtensionStmt *createExtensionStmt = castNode(CreateExtensionStmt, + parsetree); + + if (get_extension_oid("citus_columnar", true) == InvalidOid) + { + if (strcmp(createExtensionStmt->extname, "citus_columnar") == 0) + { + DefElem *newVersionValue = GetExtensionOption( + createExtensionStmt->options, + "new_version"); + if (newVersionValue) + { + const char *newVersion = defGetString(newVersionValue); + if (strcmp(newVersion, "11.1-0") == 0) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg( + "unsupported citus_columnar version 11.1-0"))); + } + } + + /*latest citus requires install columnar first, existing citus can only be an older version */ + if (get_extension_oid("citus", true) != InvalidOid) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg( + "must upgrade citus to version 11.1-1 first"))); + } + } + } + } + + if (IsA(parsetree, AlterExtensionStmt)) + { + AlterExtensionStmt *alterExtensionStmt = castNode(AlterExtensionStmt, parsetree); + if (strcmp(alterExtensionStmt->extname, "citus_columnar") == 0) + { + DefElem *newVersionValue = GetExtensionOption(alterExtensionStmt->options, + "new_version"); + if (newVersionValue) + { + const char *newVersion = defGetString(newVersionValue); + if (strcmp(newVersion, "11.1-0") == 0) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported citus_columnar version 11.1-0"))); + } + } + } } if (IsA(parsetree, CreateExtensionStmt)) @@ -2191,6 +2472,11 @@ ColumnarProcessUtility(PlannedStmt *pstmt, PrevProcessUtilityHook_compat(pstmt, queryString, false, context, params, queryEnv, dest, completionTag); + + if (columnarOptions != NIL) + { + SetColumnarRelOptions(columnarRangeVar, columnarOptions); + } } @@ -2354,18 +2640,30 @@ detoast_values(TupleDesc tupleDesc, Datum *orig_values, bool *isnull) static void ColumnarCheckLogicalReplication(Relation rel) { + bool pubActionInsert = false; + if (!is_publishable_relation(rel)) { return; } +#if PG_VERSION_NUM >= PG_VERSION_15 + { + PublicationDesc pubdesc; + + RelationBuildPublicationDesc(rel, &pubdesc); + pubActionInsert = pubdesc.pubactions.pubinsert; + } +#else if (rel->rd_pubactions == NULL) { GetRelationPublicationActions(rel); Assert(rel->rd_pubactions != NULL); } + pubActionInsert = rel->rd_pubactions->pubinsert; +#endif - if (rel->rd_pubactions->pubinsert) + if (pubActionInsert) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg( @@ -2375,219 +2673,36 @@ ColumnarCheckLogicalReplication(Relation rel) /* - * alter_columnar_table_set is a UDF exposed in postgres to change settings on a columnar - * table. Calling this function on a non-columnar table gives an error. + * alter_columnar_table_set() * - * sql syntax: - * pg_catalog.alter_columnar_table_set( - * table_name regclass, - * chunk_group_row_limit int DEFAULT NULL, - * stripe_row_limit int DEFAULT NULL, - * compression name DEFAULT null) + * Deprecated in 11.1-1: should issue ALTER TABLE ... SET instead. Function + * still available, but implemented in PL/pgSQL instead of C. * - * All arguments except the table name are optional. The UDF is supposed to be called - * like: - * SELECT alter_columnar_table_set('table', compression => 'pglz'); - * - * This will only update the compression of the table, keeping all other settings the - * same. Multiple settings can be changed at the same time by providing multiple - * arguments. Calling the argument with the NULL value will be interperted as not having - * provided the argument. + * C code is removed -- the symbol may still be required in some + * upgrade/downgrade paths, but it should not be called. */ PG_FUNCTION_INFO_V1(alter_columnar_table_set); Datum alter_columnar_table_set(PG_FUNCTION_ARGS) { - CheckCitusColumnarVersion(ERROR); - - Oid relationId = PG_GETARG_OID(0); - - Relation rel = table_open(relationId, AccessExclusiveLock); /* ALTER TABLE LOCK */ - if (!IsColumnarTableAmTable(relationId)) - { - ereport(ERROR, (errmsg("table %s is not a columnar table", - quote_identifier(RelationGetRelationName(rel))))); - } - - if (!pg_class_ownercheck(relationId, GetUserId())) - { - aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE, - get_rel_name(relationId)); - } - - ColumnarOptions options = { 0 }; - if (!ReadColumnarOptions(relationId, &options)) - { - ereport(ERROR, (errmsg("unable to read current options for table"))); - } - - /* chunk_group_row_limit => not null */ - if (!PG_ARGISNULL(1)) - { - options.chunkRowCount = PG_GETARG_INT32(1); - if (options.chunkRowCount < CHUNK_ROW_COUNT_MINIMUM || - options.chunkRowCount > CHUNK_ROW_COUNT_MAXIMUM) - { - ereport(ERROR, (errmsg("chunk group row count limit out of range"), - errhint("chunk group row count limit must be between " - UINT64_FORMAT " and " UINT64_FORMAT, - (uint64) CHUNK_ROW_COUNT_MINIMUM, - (uint64) CHUNK_ROW_COUNT_MAXIMUM))); - } - ereport(DEBUG1, - (errmsg("updating chunk row count to %d", options.chunkRowCount))); - } - - /* stripe_row_limit => not null */ - if (!PG_ARGISNULL(2)) - { - options.stripeRowCount = PG_GETARG_INT32(2); - if (options.stripeRowCount < STRIPE_ROW_COUNT_MINIMUM || - options.stripeRowCount > STRIPE_ROW_COUNT_MAXIMUM) - { - ereport(ERROR, (errmsg("stripe row count limit out of range"), - errhint("stripe row count limit must be between " - UINT64_FORMAT " and " UINT64_FORMAT, - (uint64) STRIPE_ROW_COUNT_MINIMUM, - (uint64) STRIPE_ROW_COUNT_MAXIMUM))); - } - ereport(DEBUG1, (errmsg( - "updating stripe row count to " UINT64_FORMAT, - options.stripeRowCount))); - } - - /* compression => not null */ - if (!PG_ARGISNULL(3)) - { - Name compressionName = PG_GETARG_NAME(3); - options.compressionType = ParseCompressionType(NameStr(*compressionName)); - if (options.compressionType == COMPRESSION_TYPE_INVALID) - { - ereport(ERROR, (errmsg("unknown compression type for columnar table: %s", - quote_identifier(NameStr(*compressionName))))); - } - ereport(DEBUG1, (errmsg("updating compression to %s", - CompressionTypeStr(options.compressionType)))); - } - - /* compression_level => not null */ - if (!PG_ARGISNULL(4)) - { - options.compressionLevel = PG_GETARG_INT32(4); - if (options.compressionLevel < COMPRESSION_LEVEL_MIN || - options.compressionLevel > COMPRESSION_LEVEL_MAX) - { - ereport(ERROR, (errmsg("compression level out of range"), - errhint("compression level must be between %d and %d", - COMPRESSION_LEVEL_MIN, - COMPRESSION_LEVEL_MAX))); - } - - ereport(DEBUG1, (errmsg("updating compression level to %d", - options.compressionLevel))); - } - - if (ColumnarTableSetOptions_hook != NULL) - { - ColumnarTableSetOptions_hook(relationId, options); - } - - SetColumnarOptions(relationId, &options); - - table_close(rel, NoLock); - - PG_RETURN_VOID(); + elog(ERROR, "alter_columnar_table_set is deprecated"); } /* - * alter_columnar_table_reset is a UDF exposed in postgres to reset the settings on a - * columnar table. Calling this function on a non-columnar table gives an error. + * alter_columnar_table_reset() * - * sql syntax: - * pg_catalog.alter_columnar_table_re - * teset( - * table_name regclass, - * chunk_group_row_limit bool DEFAULT FALSE, - * stripe_row_limit bool DEFAULT FALSE, - * compression bool DEFAULT FALSE) + * Deprecated in 11.1-1: should issue ALTER TABLE ... RESET instead. Function + * still available, but implemented in PL/pgSQL instead of C. * - * All arguments except the table name are optional. The UDF is supposed to be called - * like: - * SELECT alter_columnar_table_set('table', compression => true); - * - * All options set to true will be reset to the default system value. + * C code is removed -- the symbol may still be required in some + * upgrade/downgrade paths, but it should not be called. */ PG_FUNCTION_INFO_V1(alter_columnar_table_reset); Datum alter_columnar_table_reset(PG_FUNCTION_ARGS) { - CheckCitusColumnarVersion(ERROR); - - Oid relationId = PG_GETARG_OID(0); - - Relation rel = table_open(relationId, AccessExclusiveLock); /* ALTER TABLE LOCK */ - if (!IsColumnarTableAmTable(relationId)) - { - ereport(ERROR, (errmsg("table %s is not a columnar table", - quote_identifier(RelationGetRelationName(rel))))); - } - - if (!pg_class_ownercheck(relationId, GetUserId())) - { - aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE, - get_rel_name(relationId)); - } - - ColumnarOptions options = { 0 }; - if (!ReadColumnarOptions(relationId, &options)) - { - ereport(ERROR, (errmsg("unable to read current options for table"))); - } - - /* chunk_group_row_limit => true */ - if (!PG_ARGISNULL(1) && PG_GETARG_BOOL(1)) - { - options.chunkRowCount = columnar_chunk_group_row_limit; - ereport(DEBUG1, - (errmsg("resetting chunk row count to %d", options.chunkRowCount))); - } - - /* stripe_row_limit => true */ - if (!PG_ARGISNULL(2) && PG_GETARG_BOOL(2)) - { - options.stripeRowCount = columnar_stripe_row_limit; - ereport(DEBUG1, - (errmsg("resetting stripe row count to " UINT64_FORMAT, - options.stripeRowCount))); - } - - /* compression => true */ - if (!PG_ARGISNULL(3) && PG_GETARG_BOOL(3)) - { - options.compressionType = columnar_compression; - ereport(DEBUG1, (errmsg("resetting compression to %s", - CompressionTypeStr(options.compressionType)))); - } - - /* compression_level => true */ - if (!PG_ARGISNULL(4) && PG_GETARG_BOOL(4)) - { - options.compressionLevel = columnar_compression_level; - ereport(DEBUG1, (errmsg("reseting compression level to %d", - columnar_compression_level))); - } - - if (ColumnarTableSetOptions_hook != NULL) - { - ColumnarTableSetOptions_hook(relationId, options); - } - - SetColumnarOptions(relationId, &options); - - table_close(rel, NoLock); - - PG_RETURN_VOID(); + elog(ERROR, "alter_columnar_table_reset is deprecated"); } diff --git a/src/backend/columnar/sql/citus_columnar--11.1-0--11.1-1.sql b/src/backend/columnar/sql/citus_columnar--11.1-0--11.1-1.sql index a70d883df..ee04f5edc 100644 --- a/src/backend/columnar/sql/citus_columnar--11.1-0--11.1-1.sql +++ b/src/backend/columnar/sql/citus_columnar--11.1-0--11.1-1.sql @@ -6,18 +6,233 @@ ALTER EXTENSION citus_columnar ADD TABLE columnar.options; ALTER EXTENSION citus_columnar ADD TABLE columnar.stripe; ALTER EXTENSION citus_columnar ADD TABLE columnar.chunk_group; ALTER EXTENSION citus_columnar ADD TABLE columnar.chunk; -DO $proc$ -BEGIN --- columnar functions -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE $$ - ALTER EXTENSION citus_columnar ADD FUNCTION columnar.columnar_handler; - ALTER EXTENSION citus_columnar ADD ACCESS METHOD columnar; - ALTER EXTENSION citus_columnar ADD FUNCTION pg_catalog.alter_columnar_table_set; - ALTER EXTENSION citus_columnar ADD FUNCTION pg_catalog.alter_columnar_table_reset; - $$; -END IF; -END$proc$; + +ALTER EXTENSION citus_columnar ADD FUNCTION columnar.columnar_handler; +ALTER EXTENSION citus_columnar ADD ACCESS METHOD columnar; +ALTER EXTENSION citus_columnar ADD FUNCTION pg_catalog.alter_columnar_table_set; +ALTER EXTENSION citus_columnar ADD FUNCTION pg_catalog.alter_columnar_table_reset; + ALTER EXTENSION citus_columnar ADD FUNCTION citus_internal.upgrade_columnar_storage; ALTER EXTENSION citus_columnar ADD FUNCTION citus_internal.downgrade_columnar_storage; ALTER EXTENSION citus_columnar ADD FUNCTION citus_internal.columnar_ensure_am_depends_catalog; + +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int DEFAULT NULL, + stripe_row_limit int DEFAULT NULL, + compression name DEFAULT null, + compression_level int DEFAULT NULL) + RETURNS void + LANGUAGE plpgsql AS +$alter_columnar_table_set$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' SET ('; +begin + if (chunk_group_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit=' || chunk_group_row_limit; + noop := false; + end if; + if (stripe_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit=' || stripe_row_limit; + noop := false; + end if; + if (compression is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression=' || compression; + noop := false; + end if; + if (compression_level is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level=' || compression_level; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_set$; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int, + stripe_row_limit int, + compression name, + compression_level int) +IS 'set one or more options on a columnar table, when set to NULL no change is made'; + +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool DEFAULT false, + stripe_row_limit bool DEFAULT false, + compression bool DEFAULT false, + compression_level bool DEFAULT false) + RETURNS void + LANGUAGE plpgsql AS +$alter_columnar_table_reset$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' RESET ('; +begin + if (chunk_group_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit'; + noop := false; + end if; + if (stripe_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit'; + noop := false; + end if; + if (compression) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression'; + noop := false; + end if; + if (compression_level) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level'; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_reset$; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool, + stripe_row_limit bool, + compression bool, + compression_level bool) +IS 'reset on or more options on a columnar table to the system defaults'; + +-- rename columnar schema to columnar_internal and tighten security + +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA columnar FROM PUBLIC; +ALTER SCHEMA columnar RENAME TO columnar_internal; +REVOKE ALL PRIVILEGES ON SCHEMA columnar_internal FROM PUBLIC; + +-- move citus_internal functions to columnar_internal + +ALTER FUNCTION citus_internal.upgrade_columnar_storage(regclass) SET SCHEMA columnar_internal; +ALTER FUNCTION citus_internal.downgrade_columnar_storage(regclass) SET SCHEMA columnar_internal; +ALTER FUNCTION citus_internal.columnar_ensure_am_depends_catalog() SET SCHEMA columnar_internal; + +-- create columnar schema with public usage privileges + +CREATE SCHEMA columnar; +GRANT USAGE ON SCHEMA columnar TO PUBLIC; + +-- update UDF to account for columnar_internal schema +CREATE OR REPLACE FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog +AS $func$ +BEGIN + INSERT INTO pg_depend + WITH columnar_schema_members(relid) AS ( + SELECT pg_class.oid AS relid FROM pg_class + WHERE relnamespace = + COALESCE( + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar_internal'), + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar') + ) + AND relname IN ('chunk', + 'chunk_group', + 'chunk_group_pkey', + 'chunk_pkey', + 'options', + 'options_pkey', + 'storageid_seq', + 'stripe', + 'stripe_first_row_number_idx', + 'stripe_pkey') + ) + SELECT -- Define a dependency edge from "columnar table access method" .. + 'pg_am'::regclass::oid as classid, + (select oid from pg_am where amname = 'columnar') as objid, + 0 as objsubid, + -- ... to each object that is registered to pg_class and that lives + -- in "columnar" schema. That contains catalog tables, indexes + -- created on them and the sequences created in "columnar" schema. + -- + -- Given the possibility of user might have created their own objects + -- in columnar schema, we explicitly specify list of objects that we + -- are interested in. + 'pg_class'::regclass::oid as refclassid, + columnar_schema_members.relid as refobjid, + 0 as refobjsubid, + 'n' as deptype + FROM columnar_schema_members + -- Avoid inserting duplicate entries into pg_depend. + EXCEPT TABLE pg_depend; +END; +$func$; +COMMENT ON FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() + IS 'internal function responsible for creating dependencies from columnar ' + 'table access method to the rel objects in columnar schema'; + +-- add utility function + +CREATE FUNCTION columnar.get_storage_id(regclass) RETURNS bigint + LANGUAGE C STRICT + AS 'citus_columnar', $$columnar_relation_storageid$$; + +-- create views for columnar table information + +CREATE VIEW columnar.storage WITH (security_barrier) AS + SELECT c.oid::regclass AS relation, + columnar.get_storage_id(c.oid) AS storage_id + FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND am.amname = 'columnar' + AND pg_has_role(c.relowner, 'USAGE'); +COMMENT ON VIEW columnar.storage IS 'Columnar relation ID to storage ID mapping.'; +GRANT SELECT ON columnar.storage TO PUBLIC; + +CREATE VIEW columnar.options WITH (security_barrier) AS + SELECT regclass AS relation, chunk_group_row_limit, + stripe_row_limit, compression, compression_level + FROM columnar_internal.options o, pg_class c + WHERE o.regclass = c.oid + AND pg_has_role(c.relowner, 'USAGE'); +COMMENT ON VIEW columnar.options + IS 'Columnar options for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.options TO PUBLIC; + +CREATE VIEW columnar.stripe WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, file_offset, data_length, + column_count, chunk_row_count, row_count, chunk_group_count, first_row_number + FROM columnar_internal.stripe stripe, columnar.storage storage + WHERE stripe.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.stripe + IS 'Columnar stripe information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.stripe TO PUBLIC; + +CREATE VIEW columnar.chunk_group WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, chunk_group_num, row_count + FROM columnar_internal.chunk_group cg, columnar.storage storage + WHERE cg.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.chunk_group + IS 'Columnar chunk group information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.chunk_group TO PUBLIC; + +CREATE VIEW columnar.chunk WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, attr_num, chunk_group_num, + minimum_value, maximum_value, value_stream_offset, value_stream_length, + exists_stream_offset, exists_stream_length, value_compression_type, + value_compression_level, value_decompressed_length, value_count + FROM columnar_internal.chunk chunk, columnar.storage storage + WHERE chunk.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.chunk + IS 'Columnar chunk information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.chunk TO PUBLIC; + diff --git a/src/backend/columnar/sql/citus_columnar--11.1-1.sql b/src/backend/columnar/sql/citus_columnar--11.1-1.sql index e1266876e..c1081aba2 100644 --- a/src/backend/columnar/sql/citus_columnar--11.1-1.sql +++ b/src/backend/columnar/sql/citus_columnar--11.1-1.sql @@ -134,9 +134,6 @@ IS 'reset on or more options on a columnar table to the system defaults'; END IF; END$proc$; --- add citus_internal schema -CREATE SCHEMA IF NOT EXISTS citus_internal; - -- (this function being dropped in 10.0.3)->#include "udfs/columnar_ensure_objects_exist/10.0-1.sql" RESET search_path; @@ -175,30 +172,6 @@ BEGIN END; $$; ---#include "udfs/upgrade_columnar_storage/10.2-1.sql" -CREATE OR REPLACE FUNCTION citus_internal.upgrade_columnar_storage(rel regclass) - RETURNS VOID - STRICT - LANGUAGE c AS 'MODULE_PATHNAME', $$upgrade_columnar_storage$$; - -COMMENT ON FUNCTION citus_internal.upgrade_columnar_storage(regclass) - IS 'function to upgrade the columnar storage, if necessary'; - - ---#include "udfs/downgrade_columnar_storage/10.2-1.sql" - -CREATE OR REPLACE FUNCTION citus_internal.downgrade_columnar_storage(rel regclass) - RETURNS VOID - STRICT - LANGUAGE c AS 'MODULE_PATHNAME', $$downgrade_columnar_storage$$; - -COMMENT ON FUNCTION citus_internal.downgrade_columnar_storage(regclass) - IS 'function to downgrade the columnar storage, if necessary'; - --- upgrade storage for all columnar relations -SELECT citus_internal.upgrade_columnar_storage(c.oid) FROM pg_class c, pg_am a - WHERE c.relam = a.oid AND amname = 'columnar'; - -- columnar--10.2-1--10.2-2.sql -- revoke read access for columnar.chunk from unprivileged @@ -221,13 +194,164 @@ REVOKE SELECT ON columnar.chunk FROM PUBLIC; -- columnar--10.2-3--10.2-4.sql -CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + +-- columnar--11.0-2--11.1-1.sql + +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int DEFAULT NULL, + stripe_row_limit int DEFAULT NULL, + compression name DEFAULT null, + compression_level int DEFAULT NULL) + RETURNS void + LANGUAGE plpgsql AS +$alter_columnar_table_set$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' SET ('; +begin + if (chunk_group_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit=' || chunk_group_row_limit; + noop := false; + end if; + if (stripe_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit=' || stripe_row_limit; + noop := false; + end if; + if (compression is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression=' || compression; + noop := false; + end if; + if (compression_level is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level=' || compression_level; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_set$; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int, + stripe_row_limit int, + compression name, + compression_level int) +IS 'set one or more options on a columnar table, when set to NULL no change is made'; + +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool DEFAULT false, + stripe_row_limit bool DEFAULT false, + compression bool DEFAULT false, + compression_level bool DEFAULT false) + RETURNS void + LANGUAGE plpgsql AS +$alter_columnar_table_reset$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' RESET ('; +begin + if (chunk_group_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit'; + noop := false; + end if; + if (stripe_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit'; + noop := false; + end if; + if (compression) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression'; + noop := false; + end if; + if (compression_level) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level'; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_reset$; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool, + stripe_row_limit bool, + compression bool, + compression_level bool) +IS 'reset on or more options on a columnar table to the system defaults'; + +-- rename columnar schema to columnar_internal and tighten security + +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA columnar FROM PUBLIC; +ALTER SCHEMA columnar RENAME TO columnar_internal; +REVOKE ALL PRIVILEGES ON SCHEMA columnar_internal FROM PUBLIC; + +-- create columnar schema with public usage privileges + +CREATE SCHEMA columnar; +GRANT USAGE ON SCHEMA columnar TO PUBLIC; + +--#include "udfs/upgrade_columnar_storage/10.2-1.sql" +CREATE OR REPLACE FUNCTION columnar_internal.upgrade_columnar_storage(rel regclass) + RETURNS VOID + STRICT + LANGUAGE c AS 'MODULE_PATHNAME', $$upgrade_columnar_storage$$; + +COMMENT ON FUNCTION columnar_internal.upgrade_columnar_storage(regclass) + IS 'function to upgrade the columnar storage, if necessary'; + + +--#include "udfs/downgrade_columnar_storage/10.2-1.sql" + +CREATE OR REPLACE FUNCTION columnar_internal.downgrade_columnar_storage(rel regclass) + RETURNS VOID + STRICT + LANGUAGE c AS 'MODULE_PATHNAME', $$downgrade_columnar_storage$$; + +COMMENT ON FUNCTION columnar_internal.downgrade_columnar_storage(regclass) + IS 'function to downgrade the columnar storage, if necessary'; + +-- update UDF to account for columnar_internal schema +CREATE OR REPLACE FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() RETURNS void LANGUAGE plpgsql SET search_path = pg_catalog AS $func$ BEGIN INSERT INTO pg_depend + WITH columnar_schema_members(relid) AS ( + SELECT pg_class.oid AS relid FROM pg_class + WHERE relnamespace = + COALESCE( + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar_internal'), + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar') + ) + AND relname IN ('chunk', + 'chunk_group', + 'chunk_group_pkey', + 'chunk_pkey', + 'options', + 'options_pkey', + 'storageid_seq', + 'stripe', + 'stripe_first_row_number_idx', + 'stripe_pkey') + ) SELECT -- Define a dependency edge from "columnar table access method" .. 'pg_am'::regclass::oid as classid, (select oid from pg_am where amname = 'columnar') as objid, @@ -240,27 +364,72 @@ BEGIN -- in columnar schema, we explicitly specify list of objects that we -- are interested in. 'pg_class'::regclass::oid as refclassid, - columnar_schema_members.relname::regclass::oid as refobjid, + columnar_schema_members.relid as refobjid, 0 as refobjsubid, 'n' as deptype - FROM (VALUES ('columnar.chunk'), - ('columnar.chunk_group'), - ('columnar.chunk_group_pkey'), - ('columnar.chunk_pkey'), - ('columnar.options'), - ('columnar.options_pkey'), - ('columnar.storageid_seq'), - ('columnar.stripe'), - ('columnar.stripe_first_row_number_idx'), - ('columnar.stripe_pkey') - ) columnar_schema_members(relname) + FROM columnar_schema_members -- Avoid inserting duplicate entries into pg_depend. EXCEPT TABLE pg_depend; END; $func$; -COMMENT ON FUNCTION citus_internal.columnar_ensure_am_depends_catalog() +COMMENT ON FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() IS 'internal function responsible for creating dependencies from columnar ' 'table access method to the rel objects in columnar schema'; +SELECT columnar_internal.columnar_ensure_am_depends_catalog(); + +-- add utility function + +CREATE FUNCTION columnar.get_storage_id(regclass) RETURNS bigint + LANGUAGE C STRICT + AS 'citus_columnar', $$columnar_relation_storageid$$; + +-- create views for columnar table information + +CREATE VIEW columnar.storage WITH (security_barrier) AS + SELECT c.oid::regclass AS relation, + columnar.get_storage_id(c.oid) AS storage_id + FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND am.amname = 'columnar' + AND pg_has_role(c.relowner, 'USAGE'); +COMMENT ON VIEW columnar.storage IS 'Columnar relation ID to storage ID mapping.'; +GRANT SELECT ON columnar.storage TO PUBLIC; + +CREATE VIEW columnar.options WITH (security_barrier) AS + SELECT regclass AS relation, chunk_group_row_limit, + stripe_row_limit, compression, compression_level + FROM columnar_internal.options o, pg_class c + WHERE o.regclass = c.oid + AND pg_has_role(c.relowner, 'USAGE'); +COMMENT ON VIEW columnar.options + IS 'Columnar options for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.options TO PUBLIC; + +CREATE VIEW columnar.stripe WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, file_offset, data_length, + column_count, chunk_row_count, row_count, chunk_group_count, first_row_number + FROM columnar_internal.stripe stripe, columnar.storage storage + WHERE stripe.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.stripe + IS 'Columnar stripe information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.stripe TO PUBLIC; + +CREATE VIEW columnar.chunk_group WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, chunk_group_num, row_count + FROM columnar_internal.chunk_group cg, columnar.storage storage + WHERE cg.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.chunk_group + IS 'Columnar chunk group information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.chunk_group TO PUBLIC; + +CREATE VIEW columnar.chunk WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, attr_num, chunk_group_num, + minimum_value, maximum_value, value_stream_offset, value_stream_length, + exists_stream_offset, exists_stream_length, value_compression_type, + value_compression_level, value_decompressed_length, value_count + FROM columnar_internal.chunk chunk, columnar.storage storage + WHERE chunk.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.chunk + IS 'Columnar chunk information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.chunk TO PUBLIC; -SELECT citus_internal.columnar_ensure_am_depends_catalog(); diff --git a/src/backend/columnar/sql/columnar--11.0-2--11.1-1.sql b/src/backend/columnar/sql/columnar--11.0-2--11.1-1.sql new file mode 100644 index 000000000..85ec6ceb0 --- /dev/null +++ b/src/backend/columnar/sql/columnar--11.0-2--11.1-1.sql @@ -0,0 +1,71 @@ +#include "udfs/alter_columnar_table_set/11.1-1.sql" +#include "udfs/alter_columnar_table_reset/11.1-1.sql" + +-- rename columnar schema to columnar_internal and tighten security + +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA columnar FROM PUBLIC; +ALTER SCHEMA columnar RENAME TO columnar_internal; +REVOKE ALL PRIVILEGES ON SCHEMA columnar_internal FROM PUBLIC; + +-- create columnar schema with public usage privileges + +CREATE SCHEMA columnar; +GRANT USAGE ON SCHEMA columnar TO PUBLIC; + +-- update UDF to account for columnar_internal schema +#include "udfs/columnar_ensure_am_depends_catalog/11.1-1.sql" + +-- add utility function + +CREATE FUNCTION columnar.get_storage_id(regclass) RETURNS bigint + LANGUAGE C STRICT + AS 'citus_columnar', $$columnar_relation_storageid$$; + +-- create views for columnar table information + +CREATE VIEW columnar.storage WITH (security_barrier) AS + SELECT c.oid::regclass AS relation, + columnar.get_storage_id(c.oid) AS storage_id + FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND am.amname = 'columnar' + AND pg_has_role(c.relowner, 'USAGE'); +COMMENT ON VIEW columnar.storage IS 'Columnar relation ID to storage ID mapping.'; +GRANT SELECT ON columnar.storage TO PUBLIC; + +CREATE VIEW columnar.options WITH (security_barrier) AS + SELECT regclass AS relation, chunk_group_row_limit, + stripe_row_limit, compression, compression_level + FROM columnar_internal.options o, pg_class c + WHERE o.regclass = c.oid + AND pg_has_role(c.relowner, 'USAGE'); +COMMENT ON VIEW columnar.options + IS 'Columnar options for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.options TO PUBLIC; + +CREATE VIEW columnar.stripe WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, file_offset, data_length, + column_count, chunk_row_count, row_count, chunk_group_count, first_row_number + FROM columnar_internal.stripe stripe, columnar.storage storage + WHERE stripe.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.stripe + IS 'Columnar stripe information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.stripe TO PUBLIC; + +CREATE VIEW columnar.chunk_group WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, chunk_group_num, row_count + FROM columnar_internal.chunk_group cg, columnar.storage storage + WHERE cg.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.chunk_group + IS 'Columnar chunk group information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.chunk_group TO PUBLIC; + +CREATE VIEW columnar.chunk WITH (security_barrier) AS + SELECT relation, storage.storage_id, stripe_num, attr_num, chunk_group_num, + minimum_value, maximum_value, value_stream_offset, value_stream_length, + exists_stream_offset, exists_stream_length, value_compression_type, + value_compression_level, value_decompressed_length, value_count + FROM columnar_internal.chunk chunk, columnar.storage storage + WHERE chunk.storage_id = storage.storage_id; +COMMENT ON VIEW columnar.chunk + IS 'Columnar chunk information for tables on which the current user has ownership privileges.'; +GRANT SELECT ON columnar.chunk TO PUBLIC; diff --git a/src/backend/columnar/sql/downgrades/citus_columnar--11.1-1--11.1-0.sql b/src/backend/columnar/sql/downgrades/citus_columnar--11.1-1--11.1-0.sql index 81f3c7d3f..cd454a2e5 100644 --- a/src/backend/columnar/sql/downgrades/citus_columnar--11.1-1--11.1-0.sql +++ b/src/backend/columnar/sql/downgrades/citus_columnar--11.1-1--11.1-0.sql @@ -1,3 +1,100 @@ +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int DEFAULT NULL, + stripe_row_limit int DEFAULT NULL, + compression name DEFAULT null, + compression_level int DEFAULT NULL) + RETURNS void + LANGUAGE C +AS 'MODULE_PATHNAME', 'alter_columnar_table_set'; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int, + stripe_row_limit int, + compression name, + compression_level int) +IS 'set one or more options on a columnar table, when set to NULL no change is made'; +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool DEFAULT false, + stripe_row_limit bool DEFAULT false, + compression bool DEFAULT false, + compression_level bool DEFAULT false) + RETURNS void + LANGUAGE C +AS 'MODULE_PATHNAME', 'alter_columnar_table_reset'; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool, + stripe_row_limit bool, + compression bool, + compression_level bool) +IS 'reset on or more options on a columnar table to the system defaults'; + +CREATE OR REPLACE FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog +AS $func$ +BEGIN + INSERT INTO pg_depend + SELECT -- Define a dependency edge from "columnar table access method" .. + 'pg_am'::regclass::oid as classid, + (select oid from pg_am where amname = 'columnar') as objid, + 0 as objsubid, + -- ... to each object that is registered to pg_class and that lives + -- in "columnar" schema. That contains catalog tables, indexes + -- created on them and the sequences created in "columnar" schema. + -- + -- Given the possibility of user might have created their own objects + -- in columnar schema, we explicitly specify list of objects that we + -- are interested in. + 'pg_class'::regclass::oid as refclassid, + columnar_schema_members.relname::regclass::oid as refobjid, + 0 as refobjsubid, + 'n' as deptype + FROM (VALUES ('columnar.chunk'), + ('columnar.chunk_group'), + ('columnar.chunk_group_pkey'), + ('columnar.chunk_pkey'), + ('columnar.options'), + ('columnar.options_pkey'), + ('columnar.storageid_seq'), + ('columnar.stripe'), + ('columnar.stripe_first_row_number_idx'), + ('columnar.stripe_pkey') + ) columnar_schema_members(relname) + -- Avoid inserting duplicate entries into pg_depend. + EXCEPT TABLE pg_depend; +END; +$func$; +COMMENT ON FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() + IS 'internal function responsible for creating dependencies from columnar ' + 'table access method to the rel objects in columnar schema'; + +DROP VIEW columnar.options; +DROP VIEW columnar.stripe; +DROP VIEW columnar.chunk_group; +DROP VIEW columnar.chunk; +DROP VIEW columnar.storage; +DROP FUNCTION columnar.get_storage_id(regclass); + +DROP SCHEMA columnar; + +-- move columnar_internal functions back to citus_internal + +ALTER FUNCTION columnar_internal.upgrade_columnar_storage(regclass) SET SCHEMA citus_internal; +ALTER FUNCTION columnar_internal.downgrade_columnar_storage(regclass) SET SCHEMA citus_internal; +ALTER FUNCTION columnar_internal.columnar_ensure_am_depends_catalog() SET SCHEMA citus_internal; + +ALTER SCHEMA columnar_internal RENAME TO columnar; +GRANT USAGE ON SCHEMA columnar TO PUBLIC; +GRANT SELECT ON columnar.options TO PUBLIC; +GRANT SELECT ON columnar.stripe TO PUBLIC; +GRANT SELECT ON columnar.chunk_group TO PUBLIC; + -- detach relations from citus_columnar ALTER EXTENSION citus_columnar DROP SCHEMA columnar; @@ -8,18 +105,10 @@ ALTER EXTENSION citus_columnar DROP TABLE columnar.stripe; ALTER EXTENSION citus_columnar DROP TABLE columnar.chunk_group; ALTER EXTENSION citus_columnar DROP TABLE columnar.chunk; -DO $proc$ -BEGIN --- columnar functions -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE $$ - ALTER EXTENSION citus_columnar DROP FUNCTION columnar.columnar_handler; - ALTER EXTENSION citus_columnar DROP ACCESS METHOD columnar; - ALTER EXTENSION citus_columnar DROP FUNCTION pg_catalog.alter_columnar_table_set; - ALTER EXTENSION citus_columnar DROP FUNCTION pg_catalog.alter_columnar_table_reset; - $$; -END IF; -END$proc$; +ALTER EXTENSION citus_columnar DROP FUNCTION columnar.columnar_handler; +ALTER EXTENSION citus_columnar DROP ACCESS METHOD columnar; +ALTER EXTENSION citus_columnar DROP FUNCTION pg_catalog.alter_columnar_table_set; +ALTER EXTENSION citus_columnar DROP FUNCTION pg_catalog.alter_columnar_table_reset; -- functions under citus_internal for columnar ALTER EXTENSION citus_columnar DROP FUNCTION citus_internal.upgrade_columnar_storage; diff --git a/src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql b/src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql new file mode 100644 index 000000000..ae763e62d --- /dev/null +++ b/src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql @@ -0,0 +1,19 @@ +#include "../udfs/alter_columnar_table_set/10.0-1.sql" +#include "../udfs/alter_columnar_table_reset/10.0-1.sql" + +#include "../udfs/columnar_ensure_am_depends_catalog/10.2-4.sql" + +DROP VIEW columnar.options; +DROP VIEW columnar.stripe; +DROP VIEW columnar.chunk_group; +DROP VIEW columnar.chunk; +DROP VIEW columnar.storage; +DROP FUNCTION columnar.get_storage_id(regclass); + +DROP SCHEMA columnar; + +ALTER SCHEMA columnar_internal RENAME TO columnar; +GRANT USAGE ON SCHEMA columnar TO PUBLIC; +GRANT SELECT ON columnar.options TO PUBLIC; +GRANT SELECT ON columnar.stripe TO PUBLIC; +GRANT SELECT ON columnar.chunk_group TO PUBLIC; diff --git a/src/backend/columnar/sql/udfs/alter_columnar_table_reset/11.1-1.sql b/src/backend/columnar/sql/udfs/alter_columnar_table_reset/11.1-1.sql new file mode 100644 index 000000000..7c636dd6e --- /dev/null +++ b/src/backend/columnar/sql/udfs/alter_columnar_table_reset/11.1-1.sql @@ -0,0 +1,48 @@ +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool DEFAULT false, + stripe_row_limit bool DEFAULT false, + compression bool DEFAULT false, + compression_level bool DEFAULT false) + RETURNS void + LANGUAGE plpgsql AS +$alter_columnar_table_reset$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' RESET ('; +begin + if (chunk_group_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit'; + noop := false; + end if; + if (stripe_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit'; + noop := false; + end if; + if (compression) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression'; + noop := false; + end if; + if (compression_level) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level'; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_reset$; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_reset( + table_name regclass, + chunk_group_row_limit bool, + stripe_row_limit bool, + compression bool, + compression_level bool) +IS 'reset on or more options on a columnar table to the system defaults'; diff --git a/src/backend/columnar/sql/udfs/alter_columnar_table_reset/latest.sql b/src/backend/columnar/sql/udfs/alter_columnar_table_reset/latest.sql index bfee6feb4..7c636dd6e 100644 --- a/src/backend/columnar/sql/udfs/alter_columnar_table_reset/latest.sql +++ b/src/backend/columnar/sql/udfs/alter_columnar_table_reset/latest.sql @@ -5,8 +5,39 @@ CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_reset( compression bool DEFAULT false, compression_level bool DEFAULT false) RETURNS void - LANGUAGE C -AS 'MODULE_PATHNAME', 'alter_columnar_table_reset'; + LANGUAGE plpgsql AS +$alter_columnar_table_reset$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' RESET ('; +begin + if (chunk_group_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit'; + noop := false; + end if; + if (stripe_row_limit) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit'; + noop := false; + end if; + if (compression) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression'; + noop := false; + end if; + if (compression_level) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level'; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_reset$; COMMENT ON FUNCTION pg_catalog.alter_columnar_table_reset( table_name regclass, diff --git a/src/backend/columnar/sql/udfs/alter_columnar_table_set/11.1-1.sql b/src/backend/columnar/sql/udfs/alter_columnar_table_set/11.1-1.sql new file mode 100644 index 000000000..9e9fe1be4 --- /dev/null +++ b/src/backend/columnar/sql/udfs/alter_columnar_table_set/11.1-1.sql @@ -0,0 +1,48 @@ +CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int DEFAULT NULL, + stripe_row_limit int DEFAULT NULL, + compression name DEFAULT null, + compression_level int DEFAULT NULL) + RETURNS void + LANGUAGE plpgsql AS +$alter_columnar_table_set$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' SET ('; +begin + if (chunk_group_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit=' || chunk_group_row_limit; + noop := false; + end if; + if (stripe_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit=' || stripe_row_limit; + noop := false; + end if; + if (compression is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression=' || compression; + noop := false; + end if; + if (compression_level is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level=' || compression_level; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_set$; + +COMMENT ON FUNCTION pg_catalog.alter_columnar_table_set( + table_name regclass, + chunk_group_row_limit int, + stripe_row_limit int, + compression name, + compression_level int) +IS 'set one or more options on a columnar table, when set to NULL no change is made'; diff --git a/src/backend/columnar/sql/udfs/alter_columnar_table_set/latest.sql b/src/backend/columnar/sql/udfs/alter_columnar_table_set/latest.sql index 39ddfb9dd..9e9fe1be4 100644 --- a/src/backend/columnar/sql/udfs/alter_columnar_table_set/latest.sql +++ b/src/backend/columnar/sql/udfs/alter_columnar_table_set/latest.sql @@ -5,8 +5,39 @@ CREATE OR REPLACE FUNCTION pg_catalog.alter_columnar_table_set( compression name DEFAULT null, compression_level int DEFAULT NULL) RETURNS void - LANGUAGE C -AS 'MODULE_PATHNAME', 'alter_columnar_table_set'; + LANGUAGE plpgsql AS +$alter_columnar_table_set$ +declare + noop BOOLEAN := true; + cmd TEXT := 'ALTER TABLE ' || table_name::text || ' SET ('; +begin + if (chunk_group_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.chunk_group_row_limit=' || chunk_group_row_limit; + noop := false; + end if; + if (stripe_row_limit is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.stripe_row_limit=' || stripe_row_limit; + noop := false; + end if; + if (compression is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression=' || compression; + noop := false; + end if; + if (compression_level is not null) then + if (not noop) then cmd := cmd || ', '; end if; + cmd := cmd || 'columnar.compression_level=' || compression_level; + noop := false; + end if; + cmd := cmd || ')'; + if (not noop) then + execute cmd; + end if; + return; +end; +$alter_columnar_table_set$; COMMENT ON FUNCTION pg_catalog.alter_columnar_table_set( table_name regclass, diff --git a/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/11.1-1.sql b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/11.1-1.sql new file mode 100644 index 000000000..ade15390a --- /dev/null +++ b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/11.1-1.sql @@ -0,0 +1,48 @@ +CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog +AS $func$ +BEGIN + INSERT INTO pg_depend + WITH columnar_schema_members(relid) AS ( + SELECT pg_class.oid AS relid FROM pg_class + WHERE relnamespace = + COALESCE( + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar_internal'), + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar') + ) + AND relname IN ('chunk', + 'chunk_group', + 'chunk_group_pkey', + 'chunk_pkey', + 'options', + 'options_pkey', + 'storageid_seq', + 'stripe', + 'stripe_first_row_number_idx', + 'stripe_pkey') + ) + SELECT -- Define a dependency edge from "columnar table access method" .. + 'pg_am'::regclass::oid as classid, + (select oid from pg_am where amname = 'columnar') as objid, + 0 as objsubid, + -- ... to each object that is registered to pg_class and that lives + -- in "columnar" schema. That contains catalog tables, indexes + -- created on them and the sequences created in "columnar" schema. + -- + -- Given the possibility of user might have created their own objects + -- in columnar schema, we explicitly specify list of objects that we + -- are interested in. + 'pg_class'::regclass::oid as refclassid, + columnar_schema_members.relid as refobjid, + 0 as refobjsubid, + 'n' as deptype + FROM columnar_schema_members + -- Avoid inserting duplicate entries into pg_depend. + EXCEPT TABLE pg_depend; +END; +$func$; +COMMENT ON FUNCTION citus_internal.columnar_ensure_am_depends_catalog() + IS 'internal function responsible for creating dependencies from columnar ' + 'table access method to the rel objects in columnar schema'; diff --git a/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql index 754e03fb1..ade15390a 100644 --- a/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql +++ b/src/backend/columnar/sql/udfs/columnar_ensure_am_depends_catalog/latest.sql @@ -5,6 +5,24 @@ CREATE OR REPLACE FUNCTION citus_internal.columnar_ensure_am_depends_catalog() AS $func$ BEGIN INSERT INTO pg_depend + WITH columnar_schema_members(relid) AS ( + SELECT pg_class.oid AS relid FROM pg_class + WHERE relnamespace = + COALESCE( + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar_internal'), + (SELECT pg_namespace.oid FROM pg_namespace WHERE nspname = 'columnar') + ) + AND relname IN ('chunk', + 'chunk_group', + 'chunk_group_pkey', + 'chunk_pkey', + 'options', + 'options_pkey', + 'storageid_seq', + 'stripe', + 'stripe_first_row_number_idx', + 'stripe_pkey') + ) SELECT -- Define a dependency edge from "columnar table access method" .. 'pg_am'::regclass::oid as classid, (select oid from pg_am where amname = 'columnar') as objid, @@ -17,20 +35,10 @@ BEGIN -- in columnar schema, we explicitly specify list of objects that we -- are interested in. 'pg_class'::regclass::oid as refclassid, - columnar_schema_members.relname::regclass::oid as refobjid, + columnar_schema_members.relid as refobjid, 0 as refobjsubid, 'n' as deptype - FROM (VALUES ('columnar.chunk'), - ('columnar.chunk_group'), - ('columnar.chunk_group_pkey'), - ('columnar.chunk_pkey'), - ('columnar.options'), - ('columnar.options_pkey'), - ('columnar.storageid_seq'), - ('columnar.stripe'), - ('columnar.stripe_first_row_number_idx'), - ('columnar.stripe_pkey') - ) columnar_schema_members(relname) + FROM columnar_schema_members -- Avoid inserting duplicate entries into pg_depend. EXCEPT TABLE pg_depend; END; diff --git a/src/backend/distributed/citus--11.1-1.control b/src/backend/distributed/citus--11.1-1.control new file mode 100644 index 000000000..93c69fc63 --- /dev/null +++ b/src/backend/distributed/citus--11.1-1.control @@ -0,0 +1 @@ +requires = 'citus_columnar' diff --git a/src/backend/distributed/commands/aggregate.c b/src/backend/distributed/commands/aggregate.c deleted file mode 100644 index 3e6de88e5..000000000 --- a/src/backend/distributed/commands/aggregate.c +++ /dev/null @@ -1,89 +0,0 @@ -/*------------------------------------------------------------------------- - * - * aggregate.c - * Commands for distributing AGGREGATE statements. - * - * Copyright (c) Citus Data, Inc. - * - *------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "distributed/commands.h" -#include "distributed/commands/utility_hook.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" -#include "distributed/metadata/dependency.h" -#include "distributed/metadata_sync.h" -#include "distributed/metadata/distobject.h" -#include "distributed/multi_executor.h" -#include "nodes/parsenodes.h" -#include "utils/lsyscache.h" - - -/* - * PreprocessDefineAggregateStmt only qualifies the node with schema name. - * We will handle the rest in the Postprocess phase. - */ -List * -PreprocessDefineAggregateStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - QualifyTreeNode((Node *) node); - - return NIL; -} - - -/* - * PostprocessDefineAggregateStmt actually creates the plan we need to execute for - * aggregate propagation. - * This is the downside of using the locally created aggregate to get the sql statement. - * - * If the aggregate depends on any non-distributed relation, Citus can not distribute it. - * In order to not to prevent users from creating local aggregates on the coordinator, - * a WARNING message will be sent to the user about the case instead of erroring out. - * - * Besides creating the plan we also make sure all (new) dependencies of the aggregate - * are created on all nodes. - */ -List * -PostprocessDefineAggregateStmt(Node *node, const char *queryString) -{ - DefineStmt *stmt = castNode(DefineStmt, node); - - if (!ShouldPropagate()) - { - return NIL; - } - - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - - EnsureCoordinator(); - - EnsureSequentialMode(OBJECT_AGGREGATE); - - /* If the aggregate has any unsupported dependency, create it locally */ - DeferredErrorMessage *depError = DeferErrorIfHasUnsupportedDependency(&address); - - if (depError != NULL) - { - RaiseDeferredError(depError, WARNING); - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&address); - - List *commands = CreateFunctionDDLCommandsIdempotent(&address); - - commands = lcons(DISABLE_DDL_PROPAGATION, commands); - commands = lappend(commands, ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 17e84dfaa..ca22f929b 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -206,6 +206,7 @@ static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaNa char *sourceName, char *targetSchemaName, char *targetName); +static char * CreateMaterializedViewDDLCommand(Oid matViewOid); static char * GetAccessMethodForMatViewIfExists(Oid viewOid); static bool WillRecreateForeignKeyToReferenceTable(Oid relationId, CascadeToColocatedOption cascadeOption); @@ -216,6 +217,9 @@ PG_FUNCTION_INFO_V1(alter_distributed_table); PG_FUNCTION_INFO_V1(alter_table_set_access_method); PG_FUNCTION_INFO_V1(worker_change_sequence_dependency); +/* global variable keeping track of whether we are in a table type conversion function */ +bool InTableTypeConversionFunctionCall = false; + /* * undistribute_table gets a distributed table name and @@ -504,10 +508,16 @@ AlterTableSetAccessMethod(TableConversionParameters *params) * * The function returns a TableConversionReturn object that can stores variables that * can be used at the caller operations. + * + * To be able to provide more meaningful messages while converting a table type, + * Citus keeps InTableTypeConversionFunctionCall flag. Don't forget to set it properly + * in case you add a new way to return from this function. */ TableConversionReturn * ConvertTable(TableConversionState *con) { + InTableTypeConversionFunctionCall = true; + /* * We undistribute citus local tables that are not chained with any reference * tables via foreign keys at the end of the utility hook. @@ -536,6 +546,7 @@ ConvertTable(TableConversionState *con) * subgraph including itself, so return here. */ SetLocalEnableLocalReferenceForeignKeys(oldEnableLocalReferenceForeignKeys); + InTableTypeConversionFunctionCall = false; return NULL; } char *newAccessMethod = con->accessMethod ? con->accessMethod : @@ -701,7 +712,7 @@ ConvertTable(TableConversionState *con) char *columnarOptionsSql = GetShardedTableDDLCommandColumnar(con->hashOfName, context); - ExecuteQueryViaSPI(columnarOptionsSql, SPI_OK_SELECT); + ExecuteQueryViaSPI(columnarOptionsSql, SPI_OK_UTILITY); } con->newRelationId = get_relname_relid(con->tempName, con->schemaId); @@ -820,6 +831,7 @@ ConvertTable(TableConversionState *con) SetLocalEnableLocalReferenceForeignKeys(oldEnableLocalReferenceForeignKeys); + InTableTypeConversionFunctionCall = false; return ret; } @@ -1252,33 +1264,22 @@ GetViewCreationCommandsOfTable(Oid relationId) Oid viewOid = InvalidOid; foreach_oid(viewOid, views) { - Datum viewDefinitionDatum = DirectFunctionCall1(pg_get_viewdef, - ObjectIdGetDatum(viewOid)); - char *viewDefinition = TextDatumGetCString(viewDefinitionDatum); StringInfo query = makeStringInfo(); - char *viewName = get_rel_name(viewOid); - char *schemaName = get_namespace_name(get_rel_namespace(viewOid)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); - bool isMatView = get_rel_relkind(viewOid) == RELKIND_MATVIEW; - /* here we need to get the access method of the view to recreate it */ - char *accessMethodName = GetAccessMethodForMatViewIfExists(viewOid); - - appendStringInfoString(query, "CREATE "); - - if (isMatView) + /* See comments on CreateMaterializedViewDDLCommand for its limitations */ + if (get_rel_relkind(viewOid) == RELKIND_MATVIEW) { - appendStringInfoString(query, "MATERIALIZED "); + char *matViewCreateCommands = CreateMaterializedViewDDLCommand(viewOid); + appendStringInfoString(query, matViewCreateCommands); + } + else + { + char *viewCreateCommand = CreateViewDDLCommand(viewOid); + appendStringInfoString(query, viewCreateCommand); } - appendStringInfo(query, "VIEW %s ", qualifiedViewName); - - if (accessMethodName) - { - appendStringInfo(query, "USING %s ", accessMethodName); - } - - appendStringInfo(query, "AS %s", viewDefinition); + char *alterViewCommmand = AlterViewOwnerCommand(viewOid); + appendStringInfoString(query, alterViewCommmand); commands = lappend(commands, makeTableDDLCommandString(query->data)); } @@ -1287,6 +1288,64 @@ GetViewCreationCommandsOfTable(Oid relationId) } +/* + * CreateMaterializedViewDDLCommand creates the command to create materialized view. + * Note that this function doesn't support + * - Aliases + * - Storage parameters + * - Tablespace + * - WITH [NO] DATA + * options for the given materialized view. Parser functions for materialized views + * should be added to handle them. + * + * Related issue: https://github.com/citusdata/citus/issues/5968 + */ +static char * +CreateMaterializedViewDDLCommand(Oid matViewOid) +{ + StringInfo query = makeStringInfo(); + + char *viewName = get_rel_name(matViewOid); + char *schemaName = get_namespace_name(get_rel_namespace(matViewOid)); + char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + + /* here we need to get the access method of the view to recreate it */ + char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid); + + appendStringInfo(query, "CREATE MATERIALIZED VIEW %s ", qualifiedViewName); + + if (accessMethodName) + { + appendStringInfo(query, "USING %s ", accessMethodName); + } + + /* + * Set search_path to NIL so that all objects outside of pg_catalog will be + * schema-prefixed. + */ + OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); + overridePath->schemas = NIL; + overridePath->addCatalog = true; + PushOverrideSearchPath(overridePath); + + /* + * Push the transaction snapshot to be able to get vief definition with pg_get_viewdef + */ + PushActiveSnapshot(GetTransactionSnapshot()); + + Datum viewDefinitionDatum = DirectFunctionCall1(pg_get_viewdef, + ObjectIdGetDatum(matViewOid)); + char *viewDefinition = TextDatumGetCString(viewDefinitionDatum); + + PopActiveSnapshot(); + PopOverrideSearchPath(); + + appendStringInfo(query, "AS %s", viewDefinition); + + return query->data; +} + + /* * ReplaceTable replaces the source table with the target table. * It moves all the rows of the source table to target table with INSERT SELECT. diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index 91260a07e..1572ba8fe 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -17,6 +17,7 @@ #include "catalog/pg_proc.h" #include "commands/defrem.h" +#include "distributed/backend_data.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" diff --git a/src/backend/distributed/commands/citus_global_signal.c b/src/backend/distributed/commands/citus_global_signal.c index 64bb67f0d..8f33f91b7 100644 --- a/src/backend/distributed/commands/citus_global_signal.c +++ b/src/backend/distributed/commands/citus_global_signal.c @@ -15,6 +15,7 @@ #include "distributed/backend_data.h" #include "distributed/metadata_cache.h" +#include "distributed/remote_commands.h" #include "distributed/worker_manager.h" #include "lib/stringinfo.h" #include "signal.h" @@ -111,18 +112,39 @@ CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig) #endif } - StringInfo queryResult = makeStringInfo(); + int connectionFlags = 0; + MultiConnection *connection = GetNodeConnection(connectionFlags, + workerNode->workerName, + workerNode->workerPort); - bool reportResultError = true; - - bool success = ExecuteRemoteQueryOrCommand(workerNode->workerName, - workerNode->workerPort, cancelQuery->data, - queryResult, reportResultError); - - if (success && queryResult && strcmp(queryResult->data, "f") == 0) + if (!SendRemoteCommand(connection, cancelQuery->data)) { + /* if we cannot connect, we warn and report false */ + ReportConnectionError(connection, WARNING); + return false; + } + + bool raiseInterrupts = true; + PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts); + + /* if remote node throws an error, we also throw an error */ + if (!IsResponseOK(queryResult)) + { + ReportResultError(connection, queryResult, ERROR); + } + + StringInfo queryResultString = makeStringInfo(); + bool success = EvaluateSingleQueryResult(connection, queryResult, queryResultString); + if (success && strcmp(queryResultString->data, "f") == 0) + { + /* worker node returned "f" */ success = false; } + PQclear(queryResult); + + bool raiseErrors = false; + ClearResults(connection, raiseErrors); + return success; } diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index c284404ce..f9b977447 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -10,6 +10,8 @@ */ #include "postgres.h" +#include "pg_version_compat.h" + #include "access/htup_details.h" #include "access/xact.h" #include "catalog/pg_collation.h" @@ -36,9 +38,6 @@ static char * CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollationName); -static List * FilterNameListForDistributedCollations(List *objects, bool missing_ok, - List **addresses); -static bool ShouldPropagateDefineCollationStmt(void); /* * GetCreateCollationDDLInternal returns a CREATE COLLATE sql string for the @@ -60,12 +59,30 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati Form_pg_collation collationForm = (Form_pg_collation) GETSTRUCT(heapTuple); char collprovider = collationForm->collprovider; - const char *collcollate = NameStr(collationForm->collcollate); - const char *collctype = NameStr(collationForm->collctype); Oid collnamespace = collationForm->collnamespace; const char *collname = NameStr(collationForm->collname); bool collisdeterministic = collationForm->collisdeterministic; +#if PG_VERSION_NUM >= PG_VERSION_15 + bool isnull; + Datum datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collcollate, + &isnull); + Assert(!isnull); + char *collcollate = TextDatumGetCString(datum); + datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collctype, &isnull); + Assert(!isnull); + char *collctype = TextDatumGetCString(datum); +#else + + /* + * In versions before 15, collcollate and collctype were type "name". Use + * pstrdup() to match the interface of 15 so that we consistently free the + * result later. + */ + char *collcollate = pstrdup(NameStr(collationForm->collcollate)); + char *collctype = pstrdup(NameStr(collationForm->collctype)); +#endif + if (collowner != NULL) { *collowner = collationForm->collowner; @@ -103,6 +120,9 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati quote_literal_cstr(collctype)); } + pfree(collcollate); + pfree(collctype); + if (!collisdeterministic) { appendStringInfoString(&collationNameDef, ", deterministic = false"); @@ -162,267 +182,6 @@ AlterCollationOwnerObjectAddress(Node *node, bool missing_ok) } -/* - * FilterNameListForDistributedCollations takes a list of objects to delete. - * This list is filtered against the collations that are distributed. - * - * The original list will not be touched, a new list will be created with only the objects - * in there. - * - * objectAddresses is replaced with a list of object addresses for the filtered objects. - */ -static List * -FilterNameListForDistributedCollations(List *objects, bool missing_ok, - List **objectAddresses) -{ - List *result = NIL; - - *objectAddresses = NIL; - - List *collName = NULL; - foreach_ptr(collName, objects) - { - Oid collOid = get_collation_oid(collName, true); - ObjectAddress collAddress = { 0 }; - - if (!OidIsValid(collOid)) - { - continue; - } - - ObjectAddressSet(collAddress, CollationRelationId, collOid); - if (IsObjectDistributed(&collAddress)) - { - ObjectAddress *address = palloc0(sizeof(ObjectAddress)); - *address = collAddress; - *objectAddresses = lappend(*objectAddresses, address); - result = lappend(result, collName); - } - } - return result; -} - - -List * -PreprocessDropCollationStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - - /* - * We swap the list of objects to remove during deparse so we need a reference back to - * the old list to put back - */ - List *distributedTypeAddresses = NIL; - - if (!ShouldPropagate()) - { - return NIL; - } - - QualifyTreeNode((Node *) stmt); - - List *oldCollations = stmt->objects; - List *distributedCollations = - FilterNameListForDistributedCollations(oldCollations, stmt->missing_ok, - &distributedTypeAddresses); - if (list_length(distributedCollations) <= 0) - { - /* no distributed types to drop */ - return NIL; - } - - /* - * managing collations can only be done on the coordinator if ddl propagation is on. when - * it is off we will never get here. MX workers don't have a notion of distributed - * collations, so we block the call. - */ - EnsureCoordinator(); - - /* - * remove the entries for the distributed objects on dropping - */ - ObjectAddress *addressItem = NULL; - foreach_ptr(addressItem, distributedTypeAddresses) - { - UnmarkObjectDistributed(addressItem); - } - - /* - * temporary swap the lists of objects to delete with the distributed objects and - * deparse to an executable sql statement for the workers - */ - stmt->objects = distributedCollations; - char *dropStmtSql = DeparseTreeNode((Node *) stmt); - stmt->objects = oldCollations; - - EnsureSequentialMode(OBJECT_COLLATION); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterCollationOwnerStmt is called for change of ownership of collations - * before the ownership is changed on the local instance. - * - * If the type for which the owner is changed is distributed we execute the change on all - * the workers to keep the type in sync across the cluster. - */ -List * -PreprocessAlterCollationOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_COLLATION); - - ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&collationAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - QualifyTreeNode((Node *) stmt); - char *sql = DeparseTreeNode((Node *) stmt); - - EnsureSequentialMode(OBJECT_COLLATION); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterCollationOwnerStmt is invoked after the owner has been changed locally. - * Since changing the owner could result in new dependencies being found for this object - * we re-ensure all the dependencies for the collation do exist. - * - * This is solely to propagate the new owner (and all its dependencies) if it was not - * already distributed in the cluster. - */ -List * -PostprocessAlterCollationOwnerStmt(Node *node, const char *queryString) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_COLLATION); - - ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&collationAddress)) - { - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&collationAddress); - - return NIL; -} - - -/* - * PreprocessRenameCollationStmt is called when the user is renaming the collation. The invocation happens - * before the statement is applied locally. - * - * As the collation already exists we have access to the ObjectAddress for the collation, this is - * used to check if the collation is distributed. If the collation is distributed the rename is - * executed on all the workers to keep the collation in sync across the cluster. - */ -List * -PreprocessRenameCollationStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&collationAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - /* fully qualify */ - QualifyTreeNode((Node *) stmt); - - /* deparse sql*/ - char *renameStmtSql = DeparseTreeNode((Node *) stmt); - - EnsureSequentialMode(OBJECT_COLLATION); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) renameStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterCollationSchemaStmt is executed before the statement is applied to the local - * postgres instance. - * - * In this stage we can prepare the commands that need to be run on all workers. - */ -List * -PreprocessAlterCollationSchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_COLLATION); - - ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&collationAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - QualifyTreeNode((Node *) stmt); - char *sql = DeparseTreeNode((Node *) stmt); - - EnsureSequentialMode(OBJECT_COLLATION); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterCollationSchemaStmt is executed after the change has been applied locally, we - * can now use the new dependencies of the type to ensure all its dependencies exist on - * the workers before we apply the commands remotely. - */ -List * -PostprocessAlterCollationSchemaStmt(Node *node, const char *queryString) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_COLLATION); - - ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&collationAddress)) - { - return NIL; - } - - /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&collationAddress); - - return NIL; -} - - /* * RenameCollationStmtObjectAddress returns the ObjectAddress of the type that is the object * of the RenameStmt. Errors if missing_ok is false. @@ -500,7 +259,7 @@ GenerateBackupNameForCollationCollision(const ObjectAddress *address) return NULL; } Form_pg_collation collationForm = (Form_pg_collation) GETSTRUCT(collationTuple); - Value *namespace = makeString(get_namespace_name(collationForm->collnamespace)); + String *namespace = makeString(get_namespace_name(collationForm->collnamespace)); ReleaseSysCache(collationTuple); while (true) @@ -544,89 +303,3 @@ DefineCollationStmtObjectAddress(Node *node, bool missing_ok) return address; } - - -/* - * PreprocessDefineCollationStmt executed before the collation has been - * created locally to ensure that if the collation create statement will - * be propagated, the node is a coordinator node - */ -List * -PreprocessDefineCollationStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); - - if (!ShouldPropagateDefineCollationStmt()) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_COLLATION); - - return NIL; -} - - -/* - * PostprocessDefineCollationStmt executed after the collation has been - * created locally and before we create it on the worker nodes. - * As we now have access to ObjectAddress of the collation that is just - * created, we can mark it as distributed to make sure that its - * dependencies exist on all nodes. - */ -List * -PostprocessDefineCollationStmt(Node *node, const char *queryString) -{ - Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); - - if (!ShouldPropagateDefineCollationStmt()) - { - return NIL; - } - - ObjectAddress collationAddress = - DefineCollationStmtObjectAddress(node, false); - - DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency( - &collationAddress); - if (errMsg != NULL) - { - RaiseDeferredError(errMsg, WARNING); - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&collationAddress); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make1(DISABLE_DDL_PROPAGATION); - commands = list_concat(commands, CreateCollationDDLsIdempotent( - collationAddress.objectId)); - commands = lappend(commands, ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * ShouldPropagateDefineCollationStmt checks if collation define - * statement should be propagated. Don't propagate if: - * - metadata syncing if off - * - create statement should be propagated according the the ddl propagation policy - */ -static bool -ShouldPropagateDefineCollationStmt() -{ - if (!ShouldPropagate()) - { - return false; - } - - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return false; - } - - return true; -} diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c new file mode 100644 index 000000000..1c6a71de3 --- /dev/null +++ b/src/backend/distributed/commands/common.c @@ -0,0 +1,274 @@ +/*------------------------------------------------------------------------- + * + * common.c + * + * Most of the object propagation code consists of mostly the same + * operations, varying slightly in parameters passed around. This + * file contains most of the reusable logic in object propagation. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/objectaddress.h" +#include "nodes/parsenodes.h" +#include "tcop/utility.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" +#include "distributed/multi_executor.h" +#include "distributed/worker_transaction.h" + + +/* + * PostprocessCreateDistributedObjectFromCatalogStmt is a common function that can be used + * for most objects during their creation phase. After the creation has happened locally + * this function creates idempotent statements to recreate the object addressed by the + * ObjectAddress of resolved from the creation statement. + * + * Since object already need to be able to create idempotent creation sql to support + * scaleout operations we can reuse this logic during the initial creation of the objects + * to reduce the complexity of implementation of new DDL commands. + */ +List * +PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryString) +{ + const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); + Assert(ops != NULL); + + if (!ShouldPropagate()) + { + return NIL; + } + + /* check creation against multi-statement transaction policy */ + if (!ShouldPropagateCreateInCoordinatedTransction()) + { + return NIL; + } + + if (ops->featureFlag && *ops->featureFlag == false) + { + /* not propagating when a configured feature flag is turned off by the user */ + return NIL; + } + + ObjectAddress address = GetObjectAddressFromParseTree(stmt, false); + + EnsureCoordinator(); + EnsureSequentialMode(ops->objectType); + + /* If the object has any unsupported dependency warn, and only create locally */ + DeferredErrorMessage *depError = DeferErrorIfHasUnsupportedDependency(&address); + if (depError != NULL) + { + RaiseDeferredError(depError, WARNING); + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&address); + + List *commands = GetDependencyCreateDDLCommands(&address); + + commands = lcons(DISABLE_DDL_PROPAGATION, commands); + commands = lappend(commands, ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessAlterDistributedObjectStmt handles any updates to distributed objects by + * creating the fully qualified sql to apply to all workers after checking all + * predconditions that apply to propagating changes. + * + * Preconditions are (in order): + * - not in a CREATE/ALTER EXTENSION code block + * - citus.enable_metadata_sync is turned on + * - object being altered is distributed + * - any object specific feature flag is turned on when a feature flag is available + * + * Once we conclude to propagate the changes to the workers we make sure that the command + * has been executed on the coordinator and force any ongoing transaction to run in + * sequential mode. If any of these steps fail we raise an error to inform the user. + * + * Lastly we recreate a fully qualified version of the original sql and prepare the tasks + * to send these sql commands to the workers. These tasks include instructions to prevent + * recursion of propagation with Citus' MX functionality. + */ +List * +PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); + Assert(ops != NULL); + + ObjectAddress address = GetObjectAddressFromParseTree(stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + if (ops->featureFlag && *ops->featureFlag == false) + { + /* not propagating when a configured feature flag is turned off by the user */ + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(ops->objectType); + + QualifyTreeNode(stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PostprocessAlterDistributedObjectStmt is the counter part of + * PreprocessAlterDistributedObjectStmt that should be executed after the object has been + * changed locally. + * + * We perform the same precondition checks as before to skip this operation if any of the + * failed during preprocessing. Since we already raised an error on other checks we don't + * have to repeat them here, as they will never fail during postprocessing. + * + * When objects get altered they can start depending on undistributed objects. Now that + * the objects has been changed locally we can find these new dependencies and make sure + * they get created on the workers before we send the command list to the workers. + */ +List * +PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString) +{ + const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); + Assert(ops != NULL); + + ObjectAddress address = GetObjectAddressFromParseTree(stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + if (ops->featureFlag && *ops->featureFlag == false) + { + /* not propagating when a configured feature flag is turned off by the user */ + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * PreprocessDropDistributedObjectStmt is a general purpose hook that can propagate any + * DROP statement. + * + * DROP statements are one of the few DDL statements that can work on many different + * objects at once. Instead of resolving just one ObjectAddress and check it is + * distributed we will need to lookup many different object addresses. Only if an object + * was _not_ distributed we will need to remove it from the list of objects before we + * recreate the sql statement. + * + * Given that we actually _do_ need to drop them locally we can't simply remove them from + * the object list. Instead we create a new list where we only add distributed objects to. + * Before we recreate the sql statement we put this list on the drop statement, so that + * the SQL created will only contain the objects that are actually distributed in the + * cluster. After we have the SQL we restore the old list so that all objects get deleted + * locally. + * + * The reason we need to go through all this effort is taht we can't resolve the object + * addresses anymore after the objects have been removed locally. Meaning during the + * postprocessing we cannot understand which objects were distributed to begin with. + */ +List * +PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + + /* + * We swap the list of objects to remove during deparse so we need a reference back to + * the old list to put back + */ + List *originalObjects = stmt->objects; + + if (!ShouldPropagate()) + { + return NIL; + } + + QualifyTreeNode(node); + + List *distributedObjects = NIL; + List *distributedObjectAddresses = NIL; + Node *object = NULL; + foreach_ptr(object, stmt->objects) + { + /* TODO understand if the lock should be sth else */ + Relation rel = NULL; /* not used, but required to pass to get_object_address */ + ObjectAddress address = get_object_address(stmt->removeType, object, &rel, + AccessShareLock, stmt->missing_ok); + if (IsObjectDistributed(&address)) + { + ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress)); + *addressPtr = address; + + distributedObjects = lappend(distributedObjects, object); + distributedObjectAddresses = lappend(distributedObjectAddresses, addressPtr); + } + } + + if (list_length(distributedObjects) <= 0) + { + /* no distributed objects to drop */ + return NIL; + } + + /* + * managing objects can only be done on the coordinator if ddl propagation is on. when + * it is off we will never get here. MX workers don't have a notion of distributed + * types, so we block the call. + */ + EnsureCoordinator(); + + /* + * remove the entries for the distributed objects on dropping + */ + ObjectAddress *address = NULL; + foreach_ptr(address, distributedObjectAddresses) + { + UnmarkObjectDistributed(address); + } + + /* + * temporary swap the lists of objects to delete with the distributed objects and + * deparse to an executable sql statement for the workers + */ + stmt->objects = distributedObjects; + char *dropStmtSql = DeparseTreeNode((Node *) stmt); + stmt->objects = originalObjects; + + EnsureSequentialMode(stmt->removeType); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + dropStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 59902b038..2abb70da8 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -36,75 +36,6 @@ static Oid get_database_owner(Oid db_oid); bool EnableAlterDatabaseOwner = false; -/* - * PreprocessAlterDatabaseOwnerStmt is called during the utility hook before the alter - * command is applied locally on the coordinator. This will verify if the command needs to - * be propagated to the workers and if so prepares a list of ddl commands to execute. - */ -List * -PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_DATABASE); - - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - if (!EnableAlterDatabaseOwner) - { - /* don't propagate if GUC is turned off */ - return NIL; - } - - EnsureCoordinator(); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - EnsureSequentialMode(OBJECT_DATABASE); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterDatabaseOwnerStmt is called during the utility hook after the alter - * database command has been applied locally. - * - * Its main purpose is to propagate the newly formed dependencies onto the nodes before - * applying the change of owner of the databse. This ensures, for systems that have role - * management, that the roles will be created before applying the alter owner command. - */ -List * -PostprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_DATABASE); - - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - if (!EnableAlterDatabaseOwner) - { - /* don't propagate if GUC is turned off */ - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&typeAddress); - return NIL; -} - - /* * AlterDatabaseOwnerObjectAddress returns the ObjectAddress of the database that is the * object of the AlterOwnerStmt. Errors if missing_ok is false. @@ -115,7 +46,7 @@ AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok) AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Assert(stmt->objectType == OBJECT_DATABASE); - Oid databaseOid = get_database_oid(strVal((Value *) stmt->object), missing_ok); + Oid databaseOid = get_database_oid(strVal((String *) stmt->object), missing_ok); ObjectAddress address = { 0 }; ObjectAddressSet(address, DatabaseRelationId, databaseOid); diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index bf35a13b7..e1e51fc5d 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -34,7 +34,6 @@ typedef bool (*AddressPredicate)(const ObjectAddress *); static void EnsureDependenciesCanBeDistributed(const ObjectAddress *relationAddress); static void ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress); static int ObjectAddressComparator(const void *a, const void *b); -static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency); static List * FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate predicate); @@ -166,11 +165,28 @@ EnsureDependenciesCanBeDistributed(const ObjectAddress *objectAddress) /* - * ErrorIfCircularDependencyExists checks whether given object has circular dependency - * with itself via existing objects of pg_dist_object. + * ErrorIfCircularDependencyExists is a wrapper around + * DeferErrorIfCircularDependencyExists(), and throws error + * if circular dependency exists. */ static void ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) +{ + DeferredErrorMessage *depError = + DeferErrorIfCircularDependencyExists(objectAddress); + if (depError != NULL) + { + RaiseDeferredError(depError, ERROR); + } +} + + +/* + * DeferErrorIfCircularDependencyExists checks whether given object has + * circular dependency with itself via existing objects of pg_dist_object. + */ +DeferredErrorMessage * +DeferErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) { List *dependencies = GetAllSupportedDependenciesForObject(objectAddress); @@ -189,13 +205,18 @@ ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress) objectDescription = getObjectDescription(objectAddress); #endif - ereport(ERROR, (errmsg("Citus can not handle circular dependencies " - "between distributed objects"), - errdetail("\"%s\" circularly depends itself, resolve " - "circular dependency first", - objectDescription))); + StringInfo detailInfo = makeStringInfo(); + appendStringInfo(detailInfo, "\"%s\" circularly depends itself, resolve " + "circular dependency first", objectDescription); + + return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, + "Citus can not handle circular dependencies " + "between distributed objects", detailInfo->data, + NULL); } } + + return NULL; } @@ -289,7 +310,7 @@ GetDistributableDependenciesForObject(const ObjectAddress *target) * GetDependencyCreateDDLCommands returns a list (potentially empty or NIL) of ddl * commands to execute on a worker to create the object. */ -static List * +List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency) { switch (getObjectClass(dependency)) @@ -349,6 +370,14 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return DDLCommandsForSequence(dependency->objectId, sequenceOwnerName); } + if (relKind == RELKIND_VIEW) + { + char *createViewCommand = CreateViewDDLCommand(dependency->objectId); + char *alterViewOwnerCommand = AlterViewOwnerCommand(dependency->objectId); + + return list_make2(createViewCommand, alterViewOwnerCommand); + } + /* if this relation is not supported, break to the error at the end */ break; } diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index c677b5615..4e4c5aa82 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -16,6 +16,7 @@ #include "distributed/deparser.h" #include "distributed/pg_version_constants.h" #include "distributed/version_compat.h" +#include "distributed/commands/utility_hook.h" static DistributeObjectOps NoDistributeOps = { .deparse = NULL, @@ -28,31 +29,34 @@ static DistributeObjectOps NoDistributeOps = { static DistributeObjectOps Aggregate_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterFunctionSchemaStmt, - .postprocess = PostprocessAlterFunctionSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Aggregate_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = PostprocessAlterFunctionOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Aggregate_Define = { .deparse = NULL, .qualify = QualifyDefineAggregateStmt, - .preprocess = PreprocessDefineAggregateStmt, - .postprocess = PostprocessDefineAggregateStmt, + .preprocess = NULL, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_AGGREGATE, .address = DefineAggregateStmtObjectAddress, .markDistributed = true, }; static DistributeObjectOps Aggregate_Drop = { .deparse = DeparseDropFunctionStmt, .qualify = NULL, - .preprocess = PreprocessDropFunctionStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -60,16 +64,18 @@ static DistributeObjectOps Aggregate_Drop = { static DistributeObjectOps Aggregate_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessRenameFunctionStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_FUNCTION, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Any_AlterEnum = { .deparse = DeparseAlterEnumStmt, .qualify = QualifyAlterEnumStmt, - .preprocess = PreprocessAlterEnumStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TYPE, .address = AlterEnumStmtObjectAddress, .markDistributed = false, }; @@ -92,9 +98,10 @@ static DistributeObjectOps Any_AlterExtensionContents = { static DistributeObjectOps Any_AlterForeignServer = { .deparse = DeparseAlterForeignServerStmt, .qualify = NULL, - .preprocess = PreprocessAlterForeignServerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, - .address = NULL, + .objectType = OBJECT_FOREIGN_SERVER, + .address = AlterForeignServerStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Any_AlterFunction = { @@ -148,24 +155,29 @@ static DistributeObjectOps Any_Cluster = { static DistributeObjectOps Any_CompositeType = { .deparse = DeparseCompositeTypeStmt, .qualify = QualifyCompositeTypeStmt, - .preprocess = PreprocessCompositeTypeStmt, - .postprocess = PostprocessCompositeTypeStmt, + .preprocess = NULL, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_TYPE, + .featureFlag = &EnableCreateTypePropagation, .address = CompositeTypeStmtObjectAddress, .markDistributed = true, }; static DistributeObjectOps Any_CreateDomain = { .deparse = DeparseCreateDomainStmt, .qualify = QualifyCreateDomainStmt, - .preprocess = PreprocessCreateDomainStmt, - .postprocess = PostprocessCreateDomainStmt, + .preprocess = NULL, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_DOMAIN, .address = CreateDomainStmtObjectAddress, .markDistributed = true, }; static DistributeObjectOps Any_CreateEnum = { .deparse = DeparseCreateEnumStmt, .qualify = QualifyCreateEnumStmt, - .preprocess = PreprocessCreateEnumStmt, - .postprocess = PostprocessCreateEnumStmt, + .preprocess = NULL, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_TYPE, + .featureFlag = &EnableCreateTypePropagation, .address = CreateEnumStmtObjectAddress, .markDistributed = true, }; @@ -185,6 +197,14 @@ static DistributeObjectOps Any_CreateFunction = { .address = CreateFunctionStmtObjectAddress, .markDistributed = true, }; +static DistributeObjectOps Any_View = { + .deparse = NULL, + .qualify = NULL, + .preprocess = PreprocessViewStmt, + .postprocess = PostprocessViewStmt, + .address = ViewStmtObjectAddress, + .markDistributed = true, +}; static DistributeObjectOps Any_CreatePolicy = { .deparse = NULL, .qualify = NULL, @@ -196,8 +216,9 @@ static DistributeObjectOps Any_CreatePolicy = { static DistributeObjectOps Any_CreateForeignServer = { .deparse = DeparseCreateForeignServerStmt, .qualify = NULL, - .preprocess = PreprocessCreateForeignServerStmt, - .postprocess = PostprocessCreateForeignServerStmt, + .preprocess = NULL, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_FOREIGN_SERVER, .address = CreateForeignServerStmtObjectAddress, .markDistributed = true, }; @@ -268,31 +289,34 @@ static DistributeObjectOps Attribute_Rename = { static DistributeObjectOps Collation_AlterObjectSchema = { .deparse = DeparseAlterCollationSchemaStmt, .qualify = QualifyAlterCollationSchemaStmt, - .preprocess = PreprocessAlterCollationSchemaStmt, - .postprocess = PostprocessAlterCollationSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_COLLATION, .address = AlterCollationSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Collation_AlterOwner = { .deparse = DeparseAlterCollationOwnerStmt, .qualify = QualifyAlterCollationOwnerStmt, - .preprocess = PreprocessAlterCollationOwnerStmt, - .postprocess = PostprocessAlterCollationOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_COLLATION, .address = AlterCollationOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Collation_Define = { .deparse = NULL, .qualify = NULL, - .preprocess = PreprocessDefineCollationStmt, - .postprocess = PostprocessDefineCollationStmt, + .preprocess = NULL, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_COLLATION, .address = DefineCollationStmtObjectAddress, .markDistributed = true, }; static DistributeObjectOps Collation_Drop = { .deparse = DeparseDropCollationStmt, .qualify = QualifyDropCollationStmt, - .preprocess = PreprocessDropCollationStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -300,47 +324,53 @@ static DistributeObjectOps Collation_Drop = { static DistributeObjectOps Collation_Rename = { .deparse = DeparseRenameCollationStmt, .qualify = QualifyRenameCollationStmt, - .preprocess = PreprocessRenameCollationStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_COLLATION, .address = RenameCollationStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Database_AlterOwner = { .deparse = DeparseAlterDatabaseOwnerStmt, .qualify = NULL, - .preprocess = PreprocessAlterDatabaseOwnerStmt, - .postprocess = PostprocessAlterDatabaseOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_DATABASE, + .featureFlag = &EnableAlterDatabaseOwner, .address = AlterDatabaseOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Domain_Alter = { .deparse = DeparseAlterDomainStmt, .qualify = QualifyAlterDomainStmt, - .preprocess = PreprocessAlterDomainStmt, - .postprocess = PostprocessAlterDomainStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_DOMAIN, .address = AlterDomainStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Domain_AlterObjectSchema = { .deparse = DeparseAlterDomainSchemaStmt, .qualify = QualifyAlterDomainSchemaStmt, - .preprocess = PreprocessAlterDomainSchemaStmt, - .postprocess = PostprocessAlterDomainSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_DOMAIN, .address = AlterTypeSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Domain_AlterOwner = { .deparse = DeparseAlterDomainOwnerStmt, .qualify = QualifyAlterDomainOwnerStmt, - .preprocess = PreprocessAlterDomainOwnerStmt, - .postprocess = PostprocessAlterDomainOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_DOMAIN, .address = AlterDomainOwnerStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Domain_Drop = { .deparse = DeparseDropDomainStmt, .qualify = QualifyDropDomainStmt, - .preprocess = PreprocessDropDomainStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -348,8 +378,9 @@ static DistributeObjectOps Domain_Drop = { static DistributeObjectOps Domain_Rename = { .deparse = DeparseRenameDomainStmt, .qualify = QualifyRenameDomainStmt, - .preprocess = PreprocessRenameDomainStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_DOMAIN, .address = RenameDomainStmtObjectAddress, .markDistributed = false, }; @@ -357,8 +388,9 @@ static DistributeObjectOps Domain_Rename = { static DistributeObjectOps Domain_RenameConstraint = { .deparse = DeparseDomainRenameConstraintStmt, .qualify = QualifyDomainRenameConstraintStmt, - .preprocess = PreprocessDomainRenameConstraintStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_DOMAIN, .address = DomainRenameConstraintStmtObjectAddress, .markDistributed = false, }; @@ -381,7 +413,7 @@ static DistributeObjectOps Extension_Drop = { static DistributeObjectOps ForeignServer_Drop = { .deparse = DeparseDropForeignServerStmt, .qualify = NULL, - .preprocess = PreprocessDropForeignServerStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -389,16 +421,18 @@ static DistributeObjectOps ForeignServer_Drop = { static DistributeObjectOps ForeignServer_Rename = { .deparse = DeparseAlterForeignServerRenameStmt, .qualify = NULL, - .preprocess = PreprocessRenameForeignServerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, - .address = NULL, + .objectType = OBJECT_FOREIGN_SERVER, + .address = RenameForeignServerStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps ForeignServer_AlterOwner = { .deparse = DeparseAlterForeignServerOwnerStmt, .qualify = NULL, - .preprocess = PreprocessAlterForeignServerOwnerStmt, - .postprocess = PostprocessAlterForeignServerOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FOREIGN_SERVER, .address = AlterForeignServerOwnerStmtObjectAddress, .markDistributed = false, }; @@ -421,23 +455,33 @@ static DistributeObjectOps Function_AlterObjectDepends = { static DistributeObjectOps Function_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterFunctionSchemaStmt, - .postprocess = PostprocessAlterFunctionSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Function_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = PostprocessAlterFunctionOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Function_Drop = { .deparse = DeparseDropFunctionStmt, .qualify = NULL, - .preprocess = PreprocessDropFunctionStmt, + .preprocess = PreprocessDropDistributedObjectStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps View_Drop = { + .deparse = DeparseDropViewStmt, + .qualify = QualifyDropViewStmt, + .preprocess = PreprocessDropViewStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -445,8 +489,9 @@ static DistributeObjectOps Function_Drop = { static DistributeObjectOps Function_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessRenameFunctionStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_FUNCTION, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -485,23 +530,25 @@ static DistributeObjectOps Procedure_AlterObjectDepends = { static DistributeObjectOps Procedure_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterFunctionSchemaStmt, - .postprocess = PostprocessAlterFunctionSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Procedure_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = PostprocessAlterFunctionOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Procedure_Drop = { .deparse = DeparseDropFunctionStmt, .qualify = NULL, - .preprocess = PreprocessDropFunctionStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -509,8 +556,9 @@ static DistributeObjectOps Procedure_Drop = { static DistributeObjectOps Procedure_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessRenameFunctionStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_FUNCTION, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -548,7 +596,7 @@ static DistributeObjectOps Sequence_AlterOwner = { }; static DistributeObjectOps Sequence_Drop = { .deparse = DeparseDropSequenceStmt, - .qualify = NULL, + .qualify = QualifyDropSequenceStmt, .preprocess = PreprocessDropSequenceStmt, .postprocess = NULL, .address = NULL, @@ -565,32 +613,36 @@ static DistributeObjectOps Sequence_Rename = { static DistributeObjectOps TextSearchConfig_Alter = { .deparse = DeparseAlterTextSearchConfigurationStmt, .qualify = QualifyAlterTextSearchConfigurationStmt, - .preprocess = PreprocessAlterTextSearchConfigurationStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TSCONFIGURATION, .address = AlterTextSearchConfigurationStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_AlterObjectSchema = { .deparse = DeparseAlterTextSearchConfigurationSchemaStmt, .qualify = QualifyAlterTextSearchConfigurationSchemaStmt, - .preprocess = PreprocessAlterTextSearchConfigurationSchemaStmt, - .postprocess = PostprocessAlterTextSearchConfigurationSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_TSCONFIGURATION, .address = AlterTextSearchConfigurationSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_AlterOwner = { .deparse = DeparseAlterTextSearchConfigurationOwnerStmt, .qualify = QualifyAlterTextSearchConfigurationOwnerStmt, - .preprocess = PreprocessAlterTextSearchConfigurationOwnerStmt, - .postprocess = PostprocessAlterTextSearchConfigurationOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_TSCONFIGURATION, .address = AlterTextSearchConfigurationOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_Comment = { .deparse = DeparseTextSearchConfigurationCommentStmt, .qualify = QualifyTextSearchConfigurationCommentStmt, - .preprocess = PreprocessTextSearchConfigurationCommentStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TSCONFIGURATION, .address = TextSearchConfigurationCommentObjectAddress, .markDistributed = false, }; @@ -598,14 +650,15 @@ static DistributeObjectOps TextSearchConfig_Define = { .deparse = DeparseCreateTextSearchConfigurationStmt, .qualify = NULL, .preprocess = NULL, - .postprocess = PostprocessCreateTextSearchConfigurationStmt, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_TSCONFIGURATION, .address = CreateTextSearchConfigurationObjectAddress, .markDistributed = true, }; static DistributeObjectOps TextSearchConfig_Drop = { .deparse = DeparseDropTextSearchConfigurationStmt, .qualify = QualifyDropTextSearchConfigurationStmt, - .preprocess = PreprocessDropTextSearchConfigurationStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -613,40 +666,45 @@ static DistributeObjectOps TextSearchConfig_Drop = { static DistributeObjectOps TextSearchConfig_Rename = { .deparse = DeparseRenameTextSearchConfigurationStmt, .qualify = QualifyRenameTextSearchConfigurationStmt, - .preprocess = PreprocessRenameTextSearchConfigurationStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TSCONFIGURATION, .address = RenameTextSearchConfigurationStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchDict_Alter = { .deparse = DeparseAlterTextSearchDictionaryStmt, .qualify = QualifyAlterTextSearchDictionaryStmt, - .preprocess = PreprocessAlterTextSearchDictionaryStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TSDICTIONARY, .address = AlterTextSearchDictionaryStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchDict_AlterObjectSchema = { .deparse = DeparseAlterTextSearchDictionarySchemaStmt, .qualify = QualifyAlterTextSearchDictionarySchemaStmt, - .preprocess = PreprocessAlterTextSearchDictionarySchemaStmt, - .postprocess = PostprocessAlterTextSearchDictionarySchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_TSDICTIONARY, .address = AlterTextSearchDictionarySchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchDict_AlterOwner = { .deparse = DeparseAlterTextSearchDictionaryOwnerStmt, .qualify = QualifyAlterTextSearchDictionaryOwnerStmt, - .preprocess = PreprocessAlterTextSearchDictionaryOwnerStmt, - .postprocess = PostprocessAlterTextSearchDictionaryOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_TSDICTIONARY, .address = AlterTextSearchDictOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchDict_Comment = { .deparse = DeparseTextSearchDictionaryCommentStmt, .qualify = QualifyTextSearchDictionaryCommentStmt, - .preprocess = PreprocessTextSearchDictionaryCommentStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TSDICTIONARY, .address = TextSearchDictCommentObjectAddress, .markDistributed = false, }; @@ -654,14 +712,15 @@ static DistributeObjectOps TextSearchDict_Define = { .deparse = DeparseCreateTextSearchDictionaryStmt, .qualify = NULL, .preprocess = NULL, - .postprocess = PostprocessCreateTextSearchDictionaryStmt, + .postprocess = PostprocessCreateDistributedObjectFromCatalogStmt, + .objectType = OBJECT_TSDICTIONARY, .address = CreateTextSearchDictObjectAddress, .markDistributed = true, }; static DistributeObjectOps TextSearchDict_Drop = { .deparse = DeparseDropTextSearchDictionaryStmt, .qualify = QualifyDropTextSearchDictionaryStmt, - .preprocess = PreprocessDropTextSearchDictionaryStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -669,8 +728,9 @@ static DistributeObjectOps TextSearchDict_Drop = { static DistributeObjectOps TextSearchDict_Rename = { .deparse = DeparseRenameTextSearchDictionaryStmt, .qualify = QualifyRenameTextSearchDictionaryStmt, - .preprocess = PreprocessRenameTextSearchDictionaryStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TSDICTIONARY, .address = RenameTextSearchDictionaryStmtObjectAddress, .markDistributed = false, }; @@ -685,23 +745,25 @@ static DistributeObjectOps Trigger_AlterObjectDepends = { static DistributeObjectOps Routine_AlterObjectSchema = { .deparse = DeparseAlterFunctionSchemaStmt, .qualify = QualifyAlterFunctionSchemaStmt, - .preprocess = PreprocessAlterFunctionSchemaStmt, - .postprocess = PostprocessAlterFunctionSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionSchemaStmtObjectAddress, .markDistributed = false, }; static DistributeObjectOps Routine_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, - .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = PostprocessAlterFunctionOwnerStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_FUNCTION, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Routine_Drop = { .deparse = DeparseDropFunctionStmt, .qualify = NULL, - .preprocess = PreprocessDropFunctionStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -709,8 +771,9 @@ static DistributeObjectOps Routine_Drop = { static DistributeObjectOps Routine_Rename = { .deparse = DeparseRenameFunctionStmt, .qualify = QualifyRenameFunctionStmt, - .preprocess = PreprocessRenameFunctionStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_FUNCTION, .address = RenameFunctionStmtObjectAddress, .markDistributed = false, }; @@ -733,8 +796,9 @@ static DistributeObjectOps Schema_Grant = { static DistributeObjectOps Schema_Rename = { .deparse = DeparseAlterSchemaRenameStmt, .qualify = NULL, - .preprocess = PreprocessAlterSchemaRenameStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_SCHEMA, .address = AlterSchemaRenameStmtObjectAddress, .markDistributed = false, }; @@ -807,31 +871,66 @@ static DistributeObjectOps Table_Drop = { static DistributeObjectOps Type_AlterObjectSchema = { .deparse = DeparseAlterTypeSchemaStmt, .qualify = QualifyAlterTypeSchemaStmt, - .preprocess = PreprocessAlterTypeSchemaStmt, - .postprocess = PostprocessAlterTypeSchemaStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_TYPE, .address = AlterTypeSchemaStmtObjectAddress, .markDistributed = false, }; + +/* + * PreprocessAlterViewSchemaStmt and PostprocessAlterViewSchemaStmt functions can be called + * internally by ALTER TABLE view_name SET SCHEMA ... if the ALTER TABLE command targets a + * view. In other words ALTER VIEW view_name SET SCHEMA will use the View_AlterObjectSchema + * but ALTER TABLE view_name SET SCHEMA will use Table_AlterObjectSchema but call process + * functions of View_AlterObjectSchema internally. + */ +static DistributeObjectOps View_AlterObjectSchema = { + .deparse = DeparseAlterViewSchemaStmt, + .qualify = QualifyAlterViewSchemaStmt, + .preprocess = PreprocessAlterViewSchemaStmt, + .postprocess = PostprocessAlterViewSchemaStmt, + .address = AlterViewSchemaStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Type_AlterOwner = { .deparse = DeparseAlterTypeOwnerStmt, .qualify = QualifyAlterTypeOwnerStmt, - .preprocess = PreprocessAlterTypeOwnerStmt, - .postprocess = NULL, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = PostprocessAlterDistributedObjectStmt, + .objectType = OBJECT_TYPE, .address = AlterTypeOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Type_AlterTable = { .deparse = DeparseAlterTypeStmt, .qualify = QualifyAlterTypeStmt, - .preprocess = PreprocessAlterTypeStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TYPE, .address = AlterTypeStmtObjectAddress, .markDistributed = false, }; + +/* + * PreprocessAlterViewStmt and PostprocessAlterViewStmt functions can be called internally + * by ALTER TABLE view_name SET/RESET ... if the ALTER TABLE command targets a view. In + * other words ALTER VIEW view_name SET/RESET will use the View_AlterView + * but ALTER TABLE view_name SET/RESET will use Table_AlterTable but call process + * functions of View_AlterView internally. + */ +static DistributeObjectOps View_AlterView = { + .deparse = DeparseAlterViewStmt, + .qualify = QualifyAlterViewStmt, + .preprocess = PreprocessAlterViewStmt, + .postprocess = PostprocessAlterViewStmt, + .address = AlterViewStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Type_Drop = { .deparse = DeparseDropTypeStmt, .qualify = NULL, - .preprocess = PreprocessDropTypeStmt, + .preprocess = PreprocessDropDistributedObjectStmt, .postprocess = NULL, .address = NULL, .markDistributed = false, @@ -847,11 +946,27 @@ static DistributeObjectOps Trigger_Drop = { static DistributeObjectOps Type_Rename = { .deparse = DeparseRenameTypeStmt, .qualify = QualifyRenameTypeStmt, - .preprocess = PreprocessRenameTypeStmt, + .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, + .objectType = OBJECT_TYPE, .address = RenameTypeStmtObjectAddress, .markDistributed = false, }; + +/* + * PreprocessRenameViewStmt function can be called internally by ALTER TABLE view_name + * RENAME ... if the ALTER TABLE command targets a view or a view's column. In other words + * ALTER VIEW view_name RENAME will use the View_Rename but ALTER TABLE view_name RENAME + * will use Any_Rename but call process functions of View_Rename internally. + */ +static DistributeObjectOps View_Rename = { + .deparse = DeparseRenameViewStmt, + .qualify = QualifyRenameViewStmt, + .preprocess = PreprocessRenameViewStmt, + .postprocess = NULL, + .address = RenameViewStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Trigger_Rename = { .deparse = NULL, .qualify = NULL, @@ -1005,6 +1120,11 @@ GetDistributeObjectOps(Node *node) return &Type_AlterObjectSchema; } + case OBJECT_VIEW: + { + return &View_AlterObjectSchema; + } + default: { return &NoDistributeOps; @@ -1141,6 +1261,11 @@ GetDistributeObjectOps(Node *node) return &Sequence_AlterOwner; } + case OBJECT_VIEW: + { + return &View_AlterView; + } + default: { return &NoDistributeOps; @@ -1367,6 +1492,11 @@ GetDistributeObjectOps(Node *node) return &Trigger_Drop; } + case OBJECT_VIEW: + { + return &View_Drop; + } + default: { return &NoDistributeOps; @@ -1396,6 +1526,11 @@ GetDistributeObjectOps(Node *node) return &Any_Index; } + case T_ViewStmt: + { + return &Any_View; + } + case T_ReindexStmt: { return &Any_Reindex; @@ -1486,6 +1621,27 @@ GetDistributeObjectOps(Node *node) return &Trigger_Rename; } + case OBJECT_VIEW: + { + return &View_Rename; + } + + case OBJECT_COLUMN: + { + switch (stmt->relationType) + { + case OBJECT_VIEW: + { + return &View_Rename; + } + + default: + { + return &Any_Rename; + } + } + } + default: { return &Any_Rename; diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c index 7e4d5c080..c75af0024 100644 --- a/src/backend/distributed/commands/domain.c +++ b/src/backend/distributed/commands/domain.c @@ -37,382 +37,8 @@ static CollateClause * MakeCollateClauseFromOid(Oid collationOid); -static List * FilterNameListForDistributedDomains(List *domainNames, bool missing_ok, - List **distributedDomainAddresses); static ObjectAddress GetDomainAddressByName(TypeName *domainName, bool missing_ok); -/* - * PreprocessCreateDomainStmt handles the propagation of the create domain statements. - */ -List * -PreprocessCreateDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - if (!ShouldPropagate()) - { - return NIL; - } - - /* check creation against multi-statement transaction policy */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - QualifyTreeNode(node); - const char *sql = DeparseTreeNode(node); - sql = WrapCreateOrReplace(sql); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessCreateDomainStmt gets called after the domain has been created locally. When - * the domain is decided to be propagated we make sure all the domains dependencies exist - * on all workers. - */ -List * -PostprocessCreateDomainStmt(Node *node, const char *queryString) -{ - if (!ShouldPropagate()) - { - return NIL; - } - - /* check creation against multi-statement transaction policy */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - /* - * find object address of the just created object, because the domain has been created - * locally it can't be missing - */ - ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); - EnsureDependenciesExistOnAllNodes(&typeAddress); - - return NIL; -} - - -/* - * PreprocessDropDomainStmt gets called before dropping the domain locally. For - * distributed domains it will make sure the fully qualified statement is forwarded to all - * the workers reflecting the drop of the domain. - */ -List * -PreprocessDropDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - - - if (!ShouldPropagate()) - { - return NIL; - } - - QualifyTreeNode((Node *) stmt); - - List *oldDomains = stmt->objects; - List *distributedDomainAddresses = NIL; - List *distributedDomains = FilterNameListForDistributedDomains( - oldDomains, - stmt->missing_ok, - &distributedDomainAddresses); - if (list_length(distributedDomains) <= 0) - { - /* no distributed domains to drop */ - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - ObjectAddress *addressItem = NULL; - foreach_ptr(addressItem, distributedDomainAddresses) - { - UnmarkObjectDistributed(addressItem); - } - - /* - * temporary swap the lists of objects to delete with the distributed objects and - * deparse to an executable sql statement for the workers - */ - stmt->objects = distributedDomains; - char *dropStmtSql = DeparseTreeNode((Node *) stmt); - stmt->objects = oldDomains; - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterDomainStmt gets called for all domain specific alter statements. When - * the change happens on a distributed domain we reflect the changes on the workers. - */ -List * -PreprocessAlterDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterDomainStmt *stmt = castNode(AlterDomainStmt, node); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - QualifyTreeNode((Node *) stmt); - char *sqlStmt = DeparseTreeNode((Node *) stmt); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sqlStmt, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterDomainStmt gets called after the domain has been altered locally. A - * change on the constraints could cause new (undistributed) objects to be dependencies of - * the domain. Here we recreate any new dependencies on the workers before we forward the - * alter statement to the workers. - */ -List * -PostprocessAlterDomainStmt(Node *node, const char *queryString) -{ - AlterDomainStmt *stmt = castNode(AlterDomainStmt, node); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&domainAddress); - return NIL; -} - - -/* - * PreprocessDomainRenameConstraintStmt gets called locally when a constraint on a domain - * is renamed. When the constraint is on a distributed domain we forward the statement - * appropriately. - */ -List * -PreprocessDomainRenameConstraintStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - Assert(stmt->renameType == OBJECT_DOMCONSTRAINT); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - QualifyTreeNode((Node *) stmt); - char *sqlStmt = DeparseTreeNode((Node *) stmt); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sqlStmt, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterDomainOwnerStmt called locally when the owner of a constraint is - * changed. For distributed domains the statement is forwarded to all the workers. - */ -List * -PreprocessAlterDomainOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_DOMAIN); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - QualifyTreeNode((Node *) stmt); - char *sqlStmt = DeparseTreeNode((Node *) stmt); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sqlStmt, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterDomainOwnerStmt given the change of ownership could cause new - * dependencies to exist for the domain we make sure all dependencies for the domain are - * created before we forward the statement to the workers. - */ -List * -PostprocessAlterDomainOwnerStmt(Node *node, const char *queryString) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&domainAddress); - return NIL; -} - - -/* - * PreprocessRenameDomainStmt creates the statements to execute on the workers when the - * domain being renamed is distributed. - */ -List * -PreprocessRenameDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - Assert(stmt->renameType == OBJECT_DOMAIN); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - QualifyTreeNode((Node *) stmt); - char *sqlStmt = DeparseTreeNode((Node *) stmt); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sqlStmt, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterDomainSchemaStmt cretes the statements to execute on the workers when - * the domain being moved to a new schema has been distributed. - */ -List * -PreprocessAlterDomainSchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_DOMAIN); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_DOMAIN); - - QualifyTreeNode((Node *) stmt); - char *sqlStmt = DeparseTreeNode((Node *) stmt); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sqlStmt, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterDomainSchemaStmt makes sure any new dependencies (as result of the - * schema move) are created on the workers before we forward the statement. - */ -List * -PostprocessAlterDomainSchemaStmt(Node *node, const char *queryString) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - - ObjectAddress domainAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&domainAddress)) - { - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&domainAddress); - return NIL; -} - - -/* - * FilterNameListForDistributedDomains given a liost of domain names we will return a list - * filtered with only the names of distributed domains remaining. The pointer to the list - * distributedDomainAddresses is populated with a list of ObjectAddresses of the domains - * that are distributed. Indices between the returned list and the object addresses are - * synchronizes. - * Note: the pointer in distributedDomainAddresses should not be omitted - * - * When missing_ok is false this function will raise an error if a domain identified by a - * domain name cannot be found. - */ -static List * -FilterNameListForDistributedDomains(List *domainNames, bool missing_ok, - List **distributedDomainAddresses) -{ - Assert(distributedDomainAddresses != NULL); - - List *distributedDomainNames = NIL; - TypeName *domainName = NULL; - foreach_ptr(domainName, domainNames) - { - ObjectAddress objectAddress = GetDomainAddressByName(domainName, missing_ok); - if (IsObjectDistributed(&objectAddress)) - { - distributedDomainNames = lappend(distributedDomainNames, domainName); - if (distributedDomainAddresses) - { - ObjectAddress *allocatedAddress = palloc0(sizeof(ObjectAddress)); - *allocatedAddress = objectAddress; - *distributedDomainAddresses = lappend(*distributedDomainAddresses, - allocatedAddress); - } - } - } - - return distributedDomainNames; -} - - /* * GetDomainAddressByName returns the ObjectAddress of the domain identified by * domainName. When missing_ok is true the object id part of the ObjectAddress can be diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index efcdab15d..567bc7021 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -9,6 +9,8 @@ */ #include "postgres.h" + +#include "access/genam.h" #include "access/xact.h" #include "citus_version.h" #include "catalog/pg_extension_d.h" @@ -37,6 +39,8 @@ static void AddSchemaFieldIfMissing(CreateExtensionStmt *stmt); static List * FilterDistributedExtensions(List *extensionObjectList); static List * ExtensionNameListToObjectAddressList(List *extensionObjectList); static void MarkExistingObjectDependenciesDistributedIfSupported(void); +static List * GetAllViews(void); +static bool ShouldMarkRelationDistributedOnUpgrade(Oid relationId); static bool ShouldPropagateExtensionCommand(Node *parseTree); static bool IsAlterExtensionSetSchemaCitus(Node *parseTree); static Node * RecreateExtensionStmt(Oid extensionOid); @@ -295,7 +299,7 @@ FilterDistributedExtensions(List *extensionObjectList) { List *extensionNameList = NIL; - Value *objectName = NULL; + String *objectName = NULL; foreach_ptr(objectName, extensionObjectList) { const char *extensionName = strVal(objectName); @@ -334,7 +338,7 @@ ExtensionNameListToObjectAddressList(List *extensionObjectList) { List *extensionObjectAddressList = NIL; - Value *objectName; + String *objectName; foreach_ptr(objectName, extensionObjectList) { /* @@ -510,26 +514,78 @@ MarkExistingObjectDependenciesDistributedIfSupported() Oid citusTableId = InvalidOid; foreach_oid(citusTableId, citusTableIdList) { - ObjectAddress tableAddress = { 0 }; - ObjectAddressSet(tableAddress, RelationRelationId, citusTableId); - - if (ShouldSyncTableMetadata(citusTableId)) + if (!ShouldMarkRelationDistributedOnUpgrade(citusTableId)) { - /* we need to pass pointer allocated in the heap */ - ObjectAddress *addressPointer = palloc0(sizeof(ObjectAddress)); - *addressPointer = tableAddress; - - /* as of Citus 11, tables that should be synced are also considered object */ - resultingObjectAddresses = lappend(resultingObjectAddresses, addressPointer); + continue; } - List *distributableDependencyObjectAddresses = - GetDistributableDependenciesForObject(&tableAddress); + /* refrain reading the metadata cache for all tables */ + if (ShouldSyncTableMetadataViaCatalog(citusTableId)) + { + ObjectAddress tableAddress = { 0 }; + ObjectAddressSet(tableAddress, RelationRelationId, citusTableId); - resultingObjectAddresses = list_concat(resultingObjectAddresses, - distributableDependencyObjectAddresses); + /* + * We mark tables distributed immediately because we also need to mark + * views as distributed. We check whether the views that depend on + * the table has any auto-distirbutable dependencies below. Citus + * currently cannot "auto" distribute tables as dependencies, so we + * mark them distributed immediately. + */ + MarkObjectDistributedLocally(&tableAddress); + + /* + * All the distributable dependencies of a table should be marked as + * distributed. + */ + List *distributableDependencyObjectAddresses = + GetDistributableDependenciesForObject(&tableAddress); + + resultingObjectAddresses = + list_concat(resultingObjectAddresses, + distributableDependencyObjectAddresses); + } } + /* + * As of Citus 11, views on Citus tables that do not have any unsupported + * dependency should also be distributed. + * + * In general, we mark views distributed as long as it does not have + * any unsupported dependencies. + */ + List *viewList = GetAllViews(); + Oid viewOid = InvalidOid; + foreach_oid(viewOid, viewList) + { + if (!ShouldMarkRelationDistributedOnUpgrade(viewOid)) + { + continue; + } + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + /* + * If a view depends on multiple views, that view will be marked + * as distributed while it is processed for the last view + * table. + */ + MarkObjectDistributedLocally(&viewAddress); + + /* we need to pass pointer allocated in the heap */ + ObjectAddress *addressPointer = palloc0(sizeof(ObjectAddress)); + *addressPointer = viewAddress; + + List *distributableDependencyObjectAddresses = + GetDistributableDependenciesForObject(&viewAddress); + + resultingObjectAddresses = + list_concat(resultingObjectAddresses, + distributableDependencyObjectAddresses); + } + + /* resolve dependencies of the objects in pg_dist_object*/ List *distributedObjectAddressList = GetDistributedObjectAddressList(); @@ -565,6 +621,85 @@ MarkExistingObjectDependenciesDistributedIfSupported() } +/* + * GetAllViews returns list of view oids that exists on this server. + */ +static List * +GetAllViews(void) +{ + List *viewOidList = NIL; + + Relation pgClass = table_open(RelationRelationId, AccessShareLock); + + SysScanDesc scanDescriptor = systable_beginscan(pgClass, InvalidOid, false, NULL, + 0, NULL); + + HeapTuple heapTuple = systable_getnext(scanDescriptor); + while (HeapTupleIsValid(heapTuple)) + { + Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple); + + /* we're only interested in views */ + if (relationForm->relkind == RELKIND_VIEW) + { + viewOidList = lappend_oid(viewOidList, relationForm->oid); + } + + heapTuple = systable_getnext(scanDescriptor); + } + + systable_endscan(scanDescriptor); + table_close(pgClass, NoLock); + + return viewOidList; +} + + +/* + * ShouldMarkRelationDistributedOnUpgrade is a helper function that + * decides whether the input relation should be marked as distributed + * during the upgrade. + */ +static bool +ShouldMarkRelationDistributedOnUpgrade(Oid relationId) +{ + if (!EnableMetadataSync) + { + /* + * Just in case anything goes wrong, we should still be able + * to continue to the version upgrade. + */ + return false; + } + + ObjectAddress relationAddress = { 0 }; + ObjectAddressSet(relationAddress, RelationRelationId, relationId); + + bool pgObject = (relationId < FirstNormalObjectId); + bool ownedByExtension = IsTableOwnedByExtension(relationId); + bool alreadyDistributed = IsObjectDistributed(&relationAddress); + bool hasUnsupportedDependency = + DeferErrorIfHasUnsupportedDependency(&relationAddress) != NULL; + bool hasCircularDependency = + DeferErrorIfCircularDependencyExists(&relationAddress) != NULL; + + /* + * pgObject: Citus never marks pg objects as distributed + * ownedByExtension: let extensions manage its own objects + * alreadyDistributed: most likely via earlier versions + * hasUnsupportedDependency: Citus doesn't know how to distribute its dependencies + * hasCircularDependency: Citus cannot handle circular dependencies + */ + if (pgObject || ownedByExtension || alreadyDistributed || + hasUnsupportedDependency || hasCircularDependency) + { + return false; + } + + return true; +} + + /* * PreprocessAlterExtensionContentsStmt issues a notice. It does not propagate. */ @@ -671,7 +806,7 @@ IsDropCitusExtensionStmt(Node *parseTree) } /* now that we have a DropStmt, check if citus extension is among the objects to dropped */ - Value *objectName; + String *objectName; foreach_ptr(objectName, dropStmt->objects) { const char *extensionName = strVal(objectName); @@ -878,6 +1013,19 @@ CreateExtensionWithVersion(char *extname, char *extVersion) } +/* + * GetExtensionVersionNumber convert extension version to real value + */ +double +GetExtensionVersionNumber(char *extVersion) +{ + char *strtokPosition = NULL; + char *versionVal = strtok_r(extVersion, "-", &strtokPosition); + double versionNumber = strtod(versionVal, NULL); + return versionNumber; +} + + /* * AlterExtensionUpdateStmt builds and execute Alter extension statements * per given extension name and updates extension verision diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index 0777814df..a5fa21c0e 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -25,238 +25,9 @@ #include "nodes/primnodes.h" static Node * RecreateForeignServerStmt(Oid serverId); -static bool NameListHasDistributedServer(List *serverNames); static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok); -/* - * PreprocessCreateForeignServerStmt is called during the planning phase for - * CREATE SERVER. - */ -List * -PreprocessCreateForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - if (!ShouldPropagate()) - { - return NIL; - } - - /* check creation against multi-statement transaction policy */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_FOREIGN_SERVER); - - char *sql = DeparseTreeNode(node); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterForeignServerStmt is called during the planning phase for - * ALTER SERVER .. OPTIONS .. - */ -List * -PreprocessAlterForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node); - - ObjectAddress address = GetObjectAddressByServerName(stmt->servername, false); - - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - - char *sql = DeparseTreeNode(node); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessRenameForeignServerStmt is called during the planning phase for - * ALTER SERVER RENAME. - */ -List * -PreprocessRenameForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - Assert(stmt->renameType == OBJECT_FOREIGN_SERVER); - - ObjectAddress address = GetObjectAddressByServerName(strVal(stmt->object), false); - - /* filter distributed servers */ - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - - char *sql = DeparseTreeNode(node); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterForeignServerOwnerStmt is called during the planning phase for - * ALTER SERVER .. OWNER TO. - */ -List * -PreprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_FOREIGN_SERVER); - - ObjectAddress address = GetObjectAddressByServerName(strVal(stmt->object), false); - - /* filter distributed servers */ - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - - char *sql = DeparseTreeNode(node); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessDropForeignServerStmt is called during the planning phase for - * DROP SERVER. - */ -List * -PreprocessDropForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - Assert(stmt->removeType == OBJECT_FOREIGN_SERVER); - - bool includesDistributedServer = NameListHasDistributedServer(stmt->objects); - - if (!includesDistributedServer) - { - return NIL; - } - - if (list_length(stmt->objects) > 1) - { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot drop distributed server with other servers"), - errhint("Try dropping each object in a separate DROP command"))); - } - - if (!ShouldPropagate()) - { - return NIL; - } - - EnsureCoordinator(); - - Assert(list_length(stmt->objects) == 1); - - Value *serverValue = linitial(stmt->objects); - ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false); - - /* unmark distributed server */ - UnmarkObjectDistributed(&address); - - const char *deparsedStmt = DeparseTreeNode((Node *) stmt); - - /* - * To prevent recursive propagation in mx architecture, we disable ddl - * propagation before sending the command to workers. - */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) deparsedStmt, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessCreateForeignServerStmt is called after a CREATE SERVER command has - * been executed by standard process utility. - */ -List * -PostprocessCreateForeignServerStmt(Node *node, const char *queryString) -{ - if (!ShouldPropagate()) - { - return NIL; - } - - /* check creation against multi-statement transaction policy */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - const bool missingOk = false; - ObjectAddress address = GetObjectAddressFromParseTree(node, missingOk); - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - -/* - * PostprocessAlterForeignServerOwnerStmt is called after a ALTER SERVER OWNER command - * has been executed by standard process utility. - */ -List * -PostprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString) -{ - const bool missingOk = false; - ObjectAddress address = GetObjectAddressFromParseTree(node, missingOk); - - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - /* * CreateForeignServerStmtObjectAddress finds the ObjectAddress for the server * that is created by given CreateForeignServerStmt. If missingOk is false and if @@ -274,6 +45,41 @@ CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok) } +/* + * AlterForeignServerStmtObjectAddress finds the ObjectAddress for the server that is + * changed by given AlterForeignServerStmt. If missingOk is false and if + * the server does not exist, then it errors out. + * + * Never returns NULL, but the objid in the address can be invalid if missingOk + * was set to true. + */ +ObjectAddress +AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node); + + return GetObjectAddressByServerName(stmt->servername, missing_ok); +} + + +/* + * RenameForeignServerStmtObjectAddress finds the ObjectAddress for the server that is + * renamed by given RenmaeStmt. If missingOk is false and if the server does not exist, + * then it errors out. + * + * Never returns NULL, but the objid in the address can be invalid if missingOk + * was set to true. + */ +ObjectAddress +RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_FOREIGN_SERVER); + + return GetObjectAddressByServerName(strVal(stmt->object), missing_ok); +} + + /* * AlterForeignServerOwnerStmtObjectAddress finds the ObjectAddress for the server * given in AlterOwnerStmt. If missingOk is false and if @@ -355,28 +161,6 @@ RecreateForeignServerStmt(Oid serverId) } -/* - * NameListHasDistributedServer takes a namelist of servers and returns true if at least - * one of them is distributed. Returns false otherwise. - */ -static bool -NameListHasDistributedServer(List *serverNames) -{ - Value *serverValue = NULL; - foreach_ptr(serverValue, serverNames) - { - ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false); - - if (IsObjectDistributed(&address)) - { - return true; - } - } - - return false; -} - - static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok) { diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 879aa4770..c8947b351 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -490,7 +490,7 @@ GetDistributionArgIndex(Oid functionOid, char *distributionArgumentName, distributionArgumentName++; /* throws error if the input is not an integer */ - distributionArgumentIndex = pg_atoi(distributionArgumentName, 4, 0); + distributionArgumentIndex = pg_strtoint32(distributionArgumentName); if (distributionArgumentIndex < 1 || distributionArgumentIndex > numberOfArgs) { @@ -1502,234 +1502,6 @@ PreprocessAlterFunctionStmt(Node *node, const char *queryString, } -/* - * PreprocessRenameFunctionStmt is called when the user is renaming a function. The invocation - * happens before the statement is applied locally. - * - * As the function already exists we have access to the ObjectAddress, this is used to - * check if it is distributed. If so the rename is executed on all the workers to keep the - * types in sync across the cluster. - */ -List * -PreprocessRenameFunctionStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - AssertObjectTypeIsFunctional(stmt->renameType); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateAlterFunction(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_FUNCTION); - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterFunctionSchemaStmt is executed before the statement is applied to the local - * postgres instance. - * - * In this stage we can prepare the commands that need to be run on all workers. - */ -List * -PreprocessAlterFunctionSchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - AssertObjectTypeIsFunctional(stmt->objectType); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateAlterFunction(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_FUNCTION); - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterFunctionOwnerStmt is called for change of owner ship of functions before the owner - * ship is changed on the local instance. - * - * If the function for which the owner is changed is distributed we execute the change on - * all the workers to keep the type in sync across the cluster. - */ -List * -PreprocessAlterFunctionOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - AssertObjectTypeIsFunctional(stmt->objectType); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateAlterFunction(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_FUNCTION); - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterFunctionOwnerStmt is invoked after the owner has been changed locally. - * Since changing the owner could result in new dependencies being found for this object - * we re-ensure all the dependencies for the function do exist. - * - * This is solely to propagate the new owner (and all its dependencies) if it was not - * already distributed in the cluster. - */ -List * -PostprocessAlterFunctionOwnerStmt(Node *node, const char *queryString) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - AssertObjectTypeIsFunctional(stmt->objectType); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateAlterFunction(&address)) - { - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - -/* - * PreprocessDropFunctionStmt gets called during the planning phase of a DROP FUNCTION statement - * and returns a list of DDLJob's that will drop any distributed functions from the - * workers. - * - * The DropStmt could have multiple objects to drop, the list of objects will be filtered - * to only keep the distributed functions for deletion on the workers. Non-distributed - * functions will still be dropped locally but not on the workers. - */ -List * -PreprocessDropFunctionStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - List *deletingObjectWithArgsList = stmt->objects; - List *distributedObjectWithArgsList = NIL; - List *distributedFunctionAddresses = NIL; - - AssertObjectTypeIsFunctional(stmt->removeType); - - if (creating_extension) - { - /* - * extensions should be created separately on the workers, types cascading from an - * extension should therefore not be propagated here. - */ - return NIL; - } - - if (!EnableMetadataSync) - { - /* - * we are configured to disable object propagation, should not propagate anything - */ - return NIL; - } - - - /* - * Our statements need to be fully qualified so we can drop them from the right schema - * on the workers - */ - QualifyTreeNode((Node *) stmt); - - /* - * iterate over all functions to be dropped and filter to keep only distributed - * functions. - */ - ObjectWithArgs *func = NULL; - foreach_ptr(func, deletingObjectWithArgsList) - { - ObjectAddress address = FunctionToObjectAddress(stmt->removeType, func, - stmt->missing_ok); - - if (!IsObjectDistributed(&address)) - { - continue; - } - - /* collect information for all distributed functions */ - ObjectAddress *addressp = palloc(sizeof(ObjectAddress)); - *addressp = address; - distributedFunctionAddresses = lappend(distributedFunctionAddresses, addressp); - distributedObjectWithArgsList = lappend(distributedObjectWithArgsList, func); - } - - if (list_length(distributedObjectWithArgsList) <= 0) - { - /* no distributed functions to drop */ - return NIL; - } - - /* - * managing types can only be done on the coordinator if ddl propagation is on. when - * it is off we will never get here. MX workers don't have a notion of distributed - * types, so we block the call. - */ - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_FUNCTION); - - /* remove the entries for the distributed objects on dropping */ - ObjectAddress *address = NULL; - foreach_ptr(address, distributedFunctionAddresses) - { - UnmarkObjectDistributed(address); - } - - /* - * Swap the list of objects before deparsing and restore the old list after. This - * ensures we only have distributed functions in the deparsed drop statement. - */ - DropStmt *stmtCopy = copyObject(stmt); - stmtCopy->objects = distributedObjectWithArgsList; - const char *dropStmtSql = DeparseTreeNode((Node *) stmtCopy); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - /* * PreprocessAlterFunctionDependsStmt is called during the planning phase of an * ALTER FUNCION ... DEPENDS ON EXTENSION ... statement. Since functions depending on @@ -1803,30 +1575,6 @@ AlterFunctionDependsStmtObjectAddress(Node *node, bool missing_ok) } -/* - * PostprocessAlterFunctionSchemaStmt is executed after the change has been applied locally, - * we can now use the new dependencies of the function to ensure all its dependencies - * exist on the workers before we apply the commands remotely. - */ -List * -PostprocessAlterFunctionSchemaStmt(Node *node, const char *queryString) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - AssertObjectTypeIsFunctional(stmt->objectType); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateAlterFunction(&address)) - { - return NIL; - } - - /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - /* * AlterFunctionStmtObjectAddress returns the ObjectAddress of the subject in the * AlterFunctionStmt. If missing_ok is set to false an error will be raised if postgres @@ -1893,7 +1641,7 @@ AlterFunctionSchemaStmtObjectAddress(Node *node, bool missing_ok) */ /* the name of the function is the last in the list of names */ - Value *funcNameStr = lfirst(list_tail(names)); + String *funcNameStr = lfirst(list_tail(names)); List *newNames = list_make2(makeString(stmt->newschema), funcNameStr); /* @@ -1938,8 +1686,8 @@ GenerateBackupNameForProcCollision(const ObjectAddress *address) char *newName = palloc0(NAMEDATALEN); char suffix[NAMEDATALEN] = { 0 }; int count = 0; - Value *namespace = makeString(get_namespace_name(get_func_namespace( - address->objectId))); + String *namespace = makeString(get_namespace_name(get_func_namespace( + address->objectId))); char *baseName = get_func_name(address->objectId); int baseLength = strlen(baseName); Oid *argtypes = NULL; diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 5ff984f66..ac04d9701 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -42,6 +42,7 @@ #include "lib/stringinfo.h" #include "miscadmin.h" #include "nodes/parsenodes.h" +#include "parser/parse_utilcmd.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -184,9 +185,18 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand, */ ErrorIfCreateIndexHasTooManyColumns(createIndexStatement); + /* + * If there are expressions on the index, we should first transform + * the statement as the default index name depends on that. We do + * it on a copy not to interfere with standard process utility. + */ + IndexStmt *copyCreateIndexStatement = + transformIndexStmt(relation->rd_id, copyObject(createIndexStatement), + createIndexCommand); + /* ensure we copy string into proper context */ MemoryContext relationContext = GetMemoryChunkContext(relationRangeVar); - char *defaultIndexName = GenerateDefaultIndexName(createIndexStatement); + char *defaultIndexName = GenerateDefaultIndexName(copyCreateIndexStatement); createIndexStatement->idxname = MemoryContextStrdup(relationContext, defaultIndexName); } @@ -464,7 +474,8 @@ GenerateCreateIndexDDLJob(IndexStmt *createIndexStatement, const char *createInd { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = CreateIndexStmtGetRelationId(createIndexStatement); + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, + CreateIndexStmtGetRelationId(createIndexStatement)); ddlJob->startNewTransaction = createIndexStatement->concurrent; ddlJob->metadataSyncCommand = createIndexCommand; ddlJob->taskList = CreateIndexTaskList(createIndexStatement); @@ -598,7 +609,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, } DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = IsReindexWithParam_compat(reindexStatement, "concurrently"); ddlJob->metadataSyncCommand = reindexCommand; @@ -695,7 +706,8 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand, MarkInvalidateForeignKeyGraph(); } - ddlJob->targetRelationId = distributedRelationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, + distributedRelationId); /* * We do not want DROP INDEX CONCURRENTLY to commit locally before diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index d2d7d9b23..80e90f88d 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2009,7 +2009,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, foreach(columnNameCell, columnNameList) { char *columnName = (char *) lfirst(columnNameCell); - Value *columnNameValue = makeString(columnName); + String *columnNameValue = makeString(columnName); attributeList = lappend(attributeList, columnNameValue); } @@ -3430,10 +3430,7 @@ InitializeCopyShardState(CopyShardState *shardState, ereport(ERROR, (errmsg("could not connect to any active placements"))); } - if (hasRemoteCopy) - { - EnsureRemoteTaskExecutionAllowed(); - } + EnsureTaskExecutionAllowed(hasRemoteCopy); /* * We just error out and code execution should never reach to this diff --git a/src/backend/distributed/commands/rename.c b/src/backend/distributed/commands/rename.c index d777c420b..6511aed81 100644 --- a/src/backend/distributed/commands/rename.c +++ b/src/backend/distributed/commands/rename.c @@ -36,11 +36,12 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, /* * We only support some of the PostgreSQL supported RENAME statements, and - * our list include only renaming table and index (related) objects. + * our list include only renaming table, index, policy and view (related) objects. */ if (!IsAlterTableRenameStmt(renameStmt) && !IsIndexRenameStmt(renameStmt) && - !IsPolicyRenameStmt(renameStmt)) + !IsPolicyRenameStmt(renameStmt) && + !IsViewRenameStmt(renameStmt)) { return NIL; } @@ -48,7 +49,7 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, /* * The lock levels here should be same as the ones taken in * RenameRelation(), renameatt() and RenameConstraint(). However, since all - * three statements have identical lock levels, we just use a single statement. + * four statements have identical lock levels, we just use a single statement. */ objectRelationId = RangeVarGetRelid(renameStmt->relation, AccessExclusiveLock, @@ -63,14 +64,31 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, return NIL; } - /* check whether we are dealing with a sequence here */ - if (get_rel_relkind(objectRelationId) == RELKIND_SEQUENCE) + /* + * Check whether we are dealing with a sequence or view here and route queries + * accordingly to the right processor function. We need to check both objects here + * since PG supports targeting sequences and views with ALTER TABLE commands. + */ + char relKind = get_rel_relkind(objectRelationId); + if (relKind == RELKIND_SEQUENCE) { RenameStmt *stmtCopy = copyObject(renameStmt); stmtCopy->renameType = OBJECT_SEQUENCE; return PreprocessRenameSequenceStmt((Node *) stmtCopy, renameCommand, processUtilityContext); } + else if (relKind == RELKIND_VIEW) + { + RenameStmt *stmtCopy = copyObject(renameStmt); + stmtCopy->relationType = OBJECT_VIEW; + if (stmtCopy->renameType == OBJECT_TABLE) + { + stmtCopy->renameType = OBJECT_VIEW; + } + + return PreprocessRenameViewStmt((Node *) stmtCopy, renameCommand, + processUtilityContext); + } /* we have no planning to do unless the table is distributed */ switch (renameStmt->renameType) @@ -127,7 +145,7 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, } DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = tableRelationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, tableRelationId); ddlJob->metadataSyncCommand = renameCommand; ddlJob->taskList = DDLTaskList(tableRelationId, renameCommand); diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 608dc0060..af0a3a856 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -150,7 +150,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) if (encryptedPassword != NULL) { - Value *encryptedPasswordValue = makeString((char *) encryptedPassword); + String *encryptedPasswordValue = makeString((char *) encryptedPassword); option->arg = (Node *) encryptedPasswordValue; } else @@ -741,8 +741,13 @@ makeStringConst(char *str, int location) { A_Const *n = makeNode(A_Const); +#if PG_VERSION_NUM >= PG_VERSION_15 + n->val.sval.type = T_String; + n->val.sval.sval = str; +#else n->val.type = T_String; n->val.val.str = str; +#endif n->location = location; return (Node *) n; @@ -759,8 +764,13 @@ makeIntConst(int val, int location) { A_Const *n = makeNode(A_Const); +#if PG_VERSION_NUM >= PG_VERSION_15 + n->val.ival.type = T_Integer; + n->val.ival.ival = val; +#else n->val.type = T_Integer; n->val.val.ival = val; +#endif n->location = location; return (Node *) n; @@ -777,8 +787,13 @@ makeFloatConst(char *str, int location) { A_Const *n = makeNode(A_Const); +#if PG_VERSION_NUM >= PG_VERSION_15 + n->val.fval.type = T_Float; + n->val.fval.fval = str; +#else n->val.type = T_Float; n->val.val.str = str; +#endif n->location = location; return (Node *) n; diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index cdee81349..6232f12fa 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -107,7 +107,7 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, EnsureSequentialMode(OBJECT_SCHEMA); - Value *schemaVal = NULL; + String *schemaVal = NULL; foreach_ptr(schemaVal, distributedSchemas) { if (SchemaHasDistributedTableWithFKey(strVal(schemaVal))) @@ -182,43 +182,6 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, } -/* - * PreprocessAlterSchemaRenameStmt is called when the user is renaming a schema. - * The invocation happens before the statement is applied locally. - * - * As the schema already exists we have access to the ObjectAddress for the schema, this - * is used to check if the schmea is distributed. If the schema is distributed the rename - * is executed on all the workers to keep the schemas in sync across the cluster. - */ -List * -PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - ObjectAddress schemaAddress = GetObjectAddressFromParseTree(node, false); - if (!ShouldPropagateObject(&schemaAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - /* fully qualify */ - QualifyTreeNode(node); - - /* deparse sql*/ - const char *renameStmtSql = DeparseTreeNode(node); - - EnsureSequentialMode(OBJECT_SCHEMA); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) renameStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - /* * CreateSchemaStmtObjectAddress returns the ObjectAddress of the schema that is * the object of the CreateSchemaStmt. Errors if missing_ok is false. @@ -288,7 +251,7 @@ FilterDistributedSchemas(List *schemas) { List *distributedSchemas = NIL; - Value *schemaValue = NULL; + String *schemaValue = NULL; foreach_ptr(schemaValue, schemas) { const char *schemaName = strVal(schemaValue); diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 3638ab737..b97ca4215 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -226,7 +226,6 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { DropStmt *stmt = castNode(DropStmt, node); - List *deletingSequencesList = stmt->objects; List *distributedSequencesList = NIL; List *distributedSequenceAddresses = NIL; @@ -259,6 +258,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, * iterate over all sequences to be dropped and filter to keep only distributed * sequences. */ + List *deletingSequencesList = stmt->objects; List *objectNameList = NULL; foreach_ptr(objectNameList, deletingSequencesList) { diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index 79a758c10..6f8e6df54 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -92,7 +92,7 @@ PreprocessCreateStatisticsStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = false; ddlJob->metadataSyncCommand = ddlCommand; ddlJob->taskList = DDLTaskList(relationId, ddlCommand); @@ -197,7 +197,7 @@ PreprocessDropStatisticsStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = false; ddlJob->metadataSyncCommand = ddlCommand; ddlJob->taskList = DDLTaskList(relationId, ddlCommand); @@ -236,7 +236,7 @@ PreprocessAlterStatisticsRenameStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = false; ddlJob->metadataSyncCommand = ddlCommand; ddlJob->taskList = DDLTaskList(relationId, ddlCommand); @@ -274,7 +274,7 @@ PreprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = false; ddlJob->metadataSyncCommand = ddlCommand; ddlJob->taskList = DDLTaskList(relationId, ddlCommand); @@ -295,7 +295,7 @@ PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString) AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_STATISTIC_EXT); - Value *statName = llast((List *) stmt->object); + String *statName = llast((List *) stmt->object); Oid statsOid = get_statistics_object_oid(list_make2(makeString(stmt->newschema), statName), false); Oid relationId = GetRelIdByStatsOid(statsOid); @@ -328,7 +328,7 @@ AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk) AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); ObjectAddress address = { 0 }; - Value *statName = llast((List *) stmt->object); + String *statName = llast((List *) stmt->object); Oid statsOid = get_statistics_object_oid(list_make2(makeString(stmt->newschema), statName), missingOk); ObjectAddressSet(address, StatisticExtRelationId, statsOid); @@ -376,7 +376,7 @@ PreprocessAlterStatisticsStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = false; ddlJob->metadataSyncCommand = ddlCommand; ddlJob->taskList = DDLTaskList(relationId, ddlCommand); @@ -416,7 +416,7 @@ PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->startNewTransaction = false; ddlJob->metadataSyncCommand = ddlCommand; ddlJob->taskList = DDLTaskList(relationId, ddlCommand); diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 220a4d049..72d761433 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -651,12 +651,21 @@ PostprocessAlterTableSchemaStmt(Node *node, const char *queryString) */ ObjectAddress tableAddress = GetObjectAddressFromParseTree((Node *) stmt, true); - /* check whether we are dealing with a sequence here */ - if (get_rel_relkind(tableAddress.objectId) == RELKIND_SEQUENCE) + /* + * Check whether we are dealing with a sequence or view here and route queries + * accordingly to the right processor function. + */ + char relKind = get_rel_relkind(tableAddress.objectId); + if (relKind == RELKIND_SEQUENCE) { stmt->objectType = OBJECT_SEQUENCE; return PostprocessAlterSequenceSchemaStmt((Node *) stmt, queryString); } + else if (relKind == RELKIND_VIEW) + { + stmt->objectType = OBJECT_VIEW; + return PostprocessAlterViewSchemaStmt((Node *) stmt, queryString); + } if (!ShouldPropagate() || !IsCitusTable(tableAddress.objectId)) { @@ -699,18 +708,26 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, } /* - * check whether we are dealing with a sequence here + * check whether we are dealing with a sequence or view here * if yes, it must be ALTER TABLE .. OWNER TO .. command - * since this is the only ALTER command of a sequence that + * since this is the only ALTER command of a sequence or view that * passes through an AlterTableStmt */ - if (get_rel_relkind(leftRelationId) == RELKIND_SEQUENCE) + char relKind = get_rel_relkind(leftRelationId); + if (relKind == RELKIND_SEQUENCE) { AlterTableStmt *stmtCopy = copyObject(alterTableStatement); AlterTableStmtObjType_compat(stmtCopy) = OBJECT_SEQUENCE; return PreprocessAlterSequenceOwnerStmt((Node *) stmtCopy, alterTableCommand, processUtilityContext); } + else if (relKind == RELKIND_VIEW) + { + AlterTableStmt *stmtCopy = copyObject(alterTableStatement); + AlterTableStmtObjType_compat(stmtCopy) = OBJECT_VIEW; + return PreprocessAlterViewStmt((Node *) stmtCopy, alterTableCommand, + processUtilityContext); + } /* * AlterTableStmt applies also to INDEX relations, and we have support for @@ -1102,7 +1119,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, /* fill them here as it is possible to use them in some conditional blocks below */ DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = leftRelationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, leftRelationId); const char *sqlForTaskList = alterTableCommand; if (deparseAT) @@ -1758,18 +1775,31 @@ PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, { return NIL; } + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, stmt->missing_ok); Oid relationId = address.objectId; - /* check whether we are dealing with a sequence here */ - if (get_rel_relkind(relationId) == RELKIND_SEQUENCE) + /* + * Check whether we are dealing with a sequence or view here and route queries + * accordingly to the right processor function. We need to check both objects here + * since PG supports targeting sequences and views with ALTER TABLE commands. + */ + char relKind = get_rel_relkind(relationId); + if (relKind == RELKIND_SEQUENCE) { AlterObjectSchemaStmt *stmtCopy = copyObject(stmt); stmtCopy->objectType = OBJECT_SEQUENCE; return PreprocessAlterSequenceSchemaStmt((Node *) stmtCopy, queryString, processUtilityContext); } + else if (relKind == RELKIND_VIEW) + { + AlterObjectSchemaStmt *stmtCopy = copyObject(stmt); + stmtCopy->objectType = OBJECT_VIEW; + return PreprocessAlterViewSchemaStmt((Node *) stmtCopy, queryString, + processUtilityContext); + } /* first check whether a distributed relation is affected */ if (!OidIsValid(relationId) || !IsCitusTable(relationId)) @@ -1779,7 +1809,7 @@ PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, DDLJob *ddlJob = palloc0(sizeof(DDLJob)); QualifyTreeNode((Node *) stmt); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->metadataSyncCommand = DeparseTreeNode((Node *) stmt); ddlJob->taskList = DDLTaskList(relationId, ddlJob->metadataSyncCommand); return list_make1(ddlJob); @@ -1939,12 +1969,19 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) * since this is the only ALTER command of a sequence that * passes through an AlterTableStmt */ - if (get_rel_relkind(relationId) == RELKIND_SEQUENCE) + char relKind = get_rel_relkind(relationId); + if (relKind == RELKIND_SEQUENCE) { AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_SEQUENCE; PostprocessAlterSequenceOwnerStmt((Node *) alterTableStatement, NULL); return; } + else if (relKind == RELKIND_VIEW) + { + AlterTableStmtObjType_compat(alterTableStatement) = OBJECT_VIEW; + PostprocessAlterViewStmt((Node *) alterTableStatement, NULL); + return; + } /* * Before ensuring each dependency exist, update dependent sequences diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c index 1b5e84aa7..05319324d 100644 --- a/src/backend/distributed/commands/text_search.c +++ b/src/backend/distributed/commands/text_search.c @@ -42,8 +42,6 @@ #include "distributed/worker_create_or_replace.h" -static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt); -static List * GetDistributedTextSearchDictionaryNames(DropStmt *stmt); static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid); static DefineStmt * GetTextSearchDictionaryDefineStmt(Oid tsdictOid); static List * GetTextSearchDictionaryInitOptions(HeapTuple tup, Form_pg_ts_dict dict); @@ -59,113 +57,6 @@ static List * get_ts_template_namelist(Oid tstemplateOid); static Oid get_ts_config_parser_oid(Oid tsconfigOid); static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype); -/* - * PostprocessCreateTextSearchConfigurationStmt is called after the TEXT SEARCH - * CONFIGURATION has been created locally. - * - * Contrary to many other objects a text search configuration is often created as a copy - * of an existing configuration. After the copy there is no relation to the configuration - * that has been copied. This prevents our normal approach of ensuring dependencies to - * exist before forwarding a close ressemblance of the statement the user executed. - * - * Instead we recreate the object based on what we find in our own catalog, hence the - * amount of work we perform in the postprocess function, contrary to other objects. - */ -List * -PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString) -{ - DefineStmt *stmt = castNode(DefineStmt, node); - Assert(stmt->kind == OBJECT_TSCONFIGURATION); - - if (!ShouldPropagate()) - { - return NIL; - } - - /* check creation against multi-statement transaction policy */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - - DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&address); - if (errMsg != NULL) - { - RaiseDeferredError(errMsg, WARNING); - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&address); - - /* - * TEXT SEARCH CONFIGURATION objects are more complex with their mappings and the - * possibility of copying from existing templates that we will require the idempotent - * recreation commands to be run for successful propagation - */ - List *commands = CreateTextSearchConfigDDLCommandsIdempotent(&address); - - commands = lcons(DISABLE_DDL_PROPAGATION, commands); - commands = lappend(commands, ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessCreateTextSearchDictionaryStmt is called after the TEXT SEARCH DICTIONARY has been - * created locally. - */ -List * -PostprocessCreateTextSearchDictionaryStmt(Node *node, const char *queryString) -{ - DefineStmt *stmt = castNode(DefineStmt, node); - Assert(stmt->kind == OBJECT_TSDICTIONARY); - - if (!ShouldPropagate()) - { - return NIL; - } - - /* check creation against multi-statement transaction policy */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - - DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&address); - if (errMsg != NULL) - { - RaiseDeferredError(errMsg, WARNING); - return NIL; - } - - EnsureDependenciesExistOnAllNodes(&address); - - QualifyTreeNode(node); - const char *createTSDictionaryStmtSql = DeparseTreeNode(node); - - /* - * To prevent recursive propagation in mx architecture, we disable ddl - * propagation before sending the command to workers. - */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createTSDictionaryStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - List * GetCreateTextSearchConfigStatements(const ObjectAddress *address) { @@ -234,602 +125,6 @@ CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address) } -/* - * PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to - * the workers. After we have dropped the configurations locally they also got removed from - * pg_dist_object so it is important to do all distribution checks before the change is - * made locally. - */ -List * -PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - Assert(stmt->removeType == OBJECT_TSCONFIGURATION); - - if (!ShouldPropagate()) - { - return NIL; - } - - List *distributedObjects = GetDistributedTextSearchConfigurationNames(stmt); - if (list_length(distributedObjects) == 0) - { - /* no distributed objects to remove */ - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - /* - * Temporarily replace the list of objects being dropped with only the list - * containing the distributed objects. After we have created the sql statement we - * restore the original list of objects to execute on locally. - * - * Because searchpaths on coordinator and workers might not be in sync we fully - * qualify the list before deparsing. This is safe because qualification doesn't - * change the original names in place, but insteads creates new ones. - */ - List *originalObjects = stmt->objects; - stmt->objects = distributedObjects; - QualifyTreeNode((Node *) stmt); - const char *dropStmtSql = DeparseTreeNode((Node *) stmt); - stmt->objects = originalObjects; - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessDropTextSearchDictionaryStmt prepares the statements we need to send to - * the workers. After we have dropped the dictionaries locally they also got removed from - * pg_dist_object so it is important to do all distribution checks before the change is - * made locally. - */ -List * -PreprocessDropTextSearchDictionaryStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - Assert(stmt->removeType == OBJECT_TSDICTIONARY); - - if (!ShouldPropagate()) - { - return NIL; - } - - List *distributedObjects = GetDistributedTextSearchDictionaryNames(stmt); - if (list_length(distributedObjects) == 0) - { - /* no distributed objects to remove */ - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - /* - * Temporarily replace the list of objects being dropped with only the list - * containing the distributed objects. After we have created the sql statement we - * restore the original list of objects to execute on locally. - * - * Because searchpaths on coordinator and workers might not be in sync we fully - * qualify the list before deparsing. This is safe because qualification doesn't - * change the original names in place, but insteads creates new ones. - */ - List *originalObjects = stmt->objects; - stmt->objects = distributedObjects; - QualifyTreeNode((Node *) stmt); - const char *dropStmtSql = DeparseTreeNode((Node *) stmt); - stmt->objects = originalObjects; - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * GetDistributedTextSearchConfigurationNames iterates over all text search configurations - * dropped, and create a list containing all configurations that are distributed. - */ -static List * -GetDistributedTextSearchConfigurationNames(DropStmt *stmt) -{ - List *objName = NULL; - List *distributedObjects = NIL; - foreach_ptr(objName, stmt->objects) - { - Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok); - if (!OidIsValid(tsconfigOid)) - { - /* skip missing configuration names, they can't be distributed */ - continue; - } - - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSConfigRelationId, tsconfigOid); - if (!IsObjectDistributed(&address)) - { - continue; - } - distributedObjects = lappend(distributedObjects, objName); - } - return distributedObjects; -} - - -/* - * GetDistributedTextSearchDictionaryNames iterates over all text search dictionaries - * dropped, and create a list containing all dictionaries that are distributed. - */ -static List * -GetDistributedTextSearchDictionaryNames(DropStmt *stmt) -{ - List *objName = NULL; - List *distributedObjects = NIL; - foreach_ptr(objName, stmt->objects) - { - Oid tsdictOid = get_ts_dict_oid(objName, stmt->missing_ok); - if (!OidIsValid(tsdictOid)) - { - /* skip missing dictionary names, they can't be distributed */ - continue; - } - - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSDictionaryRelationId, tsdictOid); - if (!IsObjectDistributed(&address)) - { - continue; - } - distributedObjects = lappend(distributedObjects, objName); - } - return distributedObjects; -} - - -/* - * PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered - * is distributed in the cluster. If that is the case it will prepare the list of commands - * to send to the worker to apply the same changes remote. - */ -List * -PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - QualifyTreeNode((Node *) stmt); - const char *alterStmtSql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterTextSearchDictionaryStmt verifies if the dictionary being altered is - * distributed in the cluster. If that is the case it will prepare the list of commands to - * send to the worker to apply the same changes remote. - */ -List * -PreprocessAlterTextSearchDictionaryStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - QualifyTreeNode((Node *) stmt); - const char *alterStmtSql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessRenameTextSearchConfigurationStmt verifies if the configuration being altered - * is distributed in the cluster. If that is the case it will prepare the list of commands - * to send to the worker to apply the same changes remote. - */ -List * -PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - Assert(stmt->renameType == OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - QualifyTreeNode((Node *) stmt); - - char *ddlCommand = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) ddlCommand, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessRenameTextSearchDictionaryStmt verifies if the dictionary being altered - * is distributed in the cluster. If that is the case it will prepare the list of commands - * to send to the worker to apply the same changes remote. - */ -List * -PreprocessRenameTextSearchDictionaryStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - RenameStmt *stmt = castNode(RenameStmt, node); - Assert(stmt->renameType == OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - QualifyTreeNode((Node *) stmt); - - char *ddlCommand = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) ddlCommand, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterTextSearchConfigurationSchemaStmt verifies if the configuration being - * altered is distributed in the cluster. If that is the case it will prepare the list of - * commands to send to the worker to apply the same changes remote. - */ -List * -PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterTextSearchDictionarySchemaStmt verifies if the dictionary being - * altered is distributed in the cluster. If that is the case it will prepare the list of - * commands to send to the worker to apply the same changes remote. - */ -List * -PreprocessAlterTextSearchDictionarySchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterTextSearchConfigurationSchemaStmt is invoked after the schema has been - * changed locally. Since changing the schema could result in new dependencies being found - * for this object we re-ensure all the dependencies for the configuration do exist. This - * is solely to propagate the new schema (and all its dependencies) if it was not already - * distributed in the cluster. - */ -List * -PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - -/* - * PostprocessAlterTextSearchDictionarySchemaStmt is invoked after the schema has been - * changed locally. Since changing the schema could result in new dependencies being found - * for this object we re-ensure all the dependencies for the dictionary do exist. This - * is solely to propagate the new schema (and all its dependencies) if it was not already - * distributed in the cluster. - */ -List * -PostprocessAlterTextSearchDictionarySchemaStmt(Node *node, const char *queryString) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - -/* - * PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed - * configuration to the workers. Since comments for configurations are promenently shown - * when listing all text search configurations this is purely a cosmetic thing when - * running in MX. - */ -List * -PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - CommentStmt *stmt = castNode(CommentStmt, node); - Assert(stmt->objtype == OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessTextSearchDictionaryCommentStmt propagates any comment on a distributed - * dictionary to the workers. Since comments for dictionaries are promenently shown - * when listing all text search dictionaries this is purely a cosmetic thing when - * running in MX. - */ -List * -PreprocessTextSearchDictionaryCommentStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - CommentStmt *stmt = castNode(CommentStmt, node); - Assert(stmt->objtype == OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterTextSearchConfigurationOwnerStmt verifies if the configuration being - * altered is distributed in the cluster. If that is the case it will prepare the list of - * commands to send to the worker to apply the same changes remote. - */ -List * -PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSCONFIGURATION); - - QualifyTreeNode((Node *) stmt); - char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterTextSearchDictionaryOwnerStmt verifies if the dictionary being - * altered is distributed in the cluster. If that is the case it will prepare the list of - * commands to send to the worker to apply the same changes remote. - */ -List * -PreprocessAlterTextSearchDictionaryOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - EnsureCoordinator(); - EnsureSequentialMode(OBJECT_TSDICTIONARY); - - QualifyTreeNode((Node *) stmt); - char *sql = DeparseTreeNode((Node *) stmt); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been - * changed locally. Since changing the owner could result in new dependencies being found - * for this object we re-ensure all the dependencies for the configuration do exist. This - * is solely to propagate the new owner (and all its dependencies) if it was not already - * distributed in the cluster. - */ -List * -PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_TSCONFIGURATION); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - /* dependencies have changed (owner) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - -/* - * PostprocessAlterTextSearchDictionaryOwnerStmt is invoked after the owner has been - * changed locally. Since changing the owner could result in new dependencies being found - * for this object we re-ensure all the dependencies for the dictionary do exist. This - * is solely to propagate the new owner (and all its dependencies) if it was not already - * distributed in the cluster. - */ -List * -PostprocessAlterTextSearchDictionaryOwnerStmt(Node *node, const char *queryString) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_TSDICTIONARY); - - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&address)) - { - return NIL; - } - - /* dependencies have changed (owner) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&address); - - return NIL; -} - - /* * GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION * based on the configuration as defined in the catalog identified by tsconfigOid. diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index a277cb372..94f4f4cef 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -44,8 +44,8 @@ /* local function forward declarations */ static bool IsCreateCitusTruncateTriggerStmt(CreateTrigStmt *createTriggerStmt); -static Value * GetAlterTriggerDependsTriggerNameValue(AlterObjectDependsStmt * - alterTriggerDependsStmt); +static String * GetAlterTriggerDependsTriggerNameValue(AlterObjectDependsStmt * + alterTriggerDependsStmt); static void ErrorIfUnsupportedDropTriggerCommand(DropStmt *dropTriggerStmt); static RangeVar * GetDropTriggerStmtRelation(DropStmt *dropTriggerStmt); static void ExtractDropStmtTriggerAndRelationName(DropStmt *dropTriggerStmt, @@ -416,7 +416,7 @@ PreprocessAlterTriggerDependsStmt(Node *node, const char *queryString, * workers */ - Value *triggerNameValue = + String *triggerNameValue = GetAlterTriggerDependsTriggerNameValue(alterTriggerDependsStmt); ereport(ERROR, (errmsg( "Triggers \"%s\" on distributed tables and local tables added to metadata " @@ -454,7 +454,7 @@ PostprocessAlterTriggerDependsStmt(Node *node, const char *queryString) EnsureCoordinator(); ErrorOutForTriggerIfNotSupported(relationId); - Value *triggerNameValue = + String *triggerNameValue = GetAlterTriggerDependsTriggerNameValue(alterTriggerDependsStmt); return CitusCreateTriggerCommandDDLJob(relationId, strVal(triggerNameValue), queryString); @@ -476,7 +476,7 @@ AlterTriggerDependsEventExtendNames(AlterObjectDependsStmt *alterTriggerDependsS char **relationName = &(relation->relname); AppendShardIdToName(relationName, shardId); - Value *triggerNameValue = + String *triggerNameValue = GetAlterTriggerDependsTriggerNameValue(alterTriggerDependsStmt); AppendShardIdToName(&strVal(triggerNameValue), shardId); @@ -489,7 +489,7 @@ AlterTriggerDependsEventExtendNames(AlterObjectDependsStmt *alterTriggerDependsS * GetAlterTriggerDependsTriggerName returns Value object for the trigger * name that given AlterObjectDependsStmt is executed for. */ -static Value * +static String * GetAlterTriggerDependsTriggerNameValue(AlterObjectDependsStmt *alterTriggerDependsStmt) { List *triggerObjectNameList = (List *) alterTriggerDependsStmt->object; @@ -503,7 +503,7 @@ GetAlterTriggerDependsTriggerNameValue(AlterObjectDependsStmt *alterTriggerDepen * be the name of the trigger in either before or after standard process * utility. */ - Value *triggerNameValue = llast(triggerObjectNameList); + String *triggerNameValue = llast(triggerObjectNameList); return triggerNameValue; } @@ -642,12 +642,12 @@ DropTriggerEventExtendNames(DropStmt *dropTriggerStmt, char *schemaName, uint64 ExtractDropStmtTriggerAndRelationName(dropTriggerStmt, &triggerName, &relationName); AppendShardIdToName(&triggerName, shardId); - Value *triggerNameValue = makeString(triggerName); + String *triggerNameValue = makeString(triggerName); AppendShardIdToName(&relationName, shardId); - Value *relationNameValue = makeString(relationName); + String *relationNameValue = makeString(relationName); - Value *schemaNameValue = makeString(pstrdup(schemaName)); + String *schemaNameValue = makeString(pstrdup(schemaName)); List *shardTriggerNameList = list_make3(schemaNameValue, relationNameValue, triggerNameValue); @@ -712,7 +712,7 @@ CitusCreateTriggerCommandDDLJob(Oid relationId, char *triggerName, const char *queryString) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->metadataSyncCommand = queryString; if (!triggerName) diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index e8ea461b4..0993c287f 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -40,15 +40,10 @@ #include "utils/rel.h" -#define LOCK_RELATION_IF_EXISTS "SELECT lock_relation_if_exists(%s, '%s');" - - /* Local functions forward declarations for unsupported command checks */ static void ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement); static void ExecuteTruncateStmtSequentialIfNecessary(TruncateStmt *command); static void EnsurePartitionTableNotReplicatedForTruncate(TruncateStmt *truncateStatement); -static void LockTruncatedRelationMetadataInWorkers(TruncateStmt *truncateStatement); -static void AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode); static List * TruncateTaskList(Oid relationId); @@ -248,7 +243,13 @@ PreprocessTruncateStatement(TruncateStmt *truncateStatement) ErrorIfUnsupportedTruncateStmt(truncateStatement); EnsurePartitionTableNotReplicatedForTruncate(truncateStatement); ExecuteTruncateStmtSequentialIfNecessary(truncateStatement); - LockTruncatedRelationMetadataInWorkers(truncateStatement); + + uint32 lockAcquiringMode = truncateStatement->behavior == DROP_CASCADE ? + DIST_LOCK_REFERENCING_TABLES : + DIST_LOCK_DEFAULT; + + AcquireDistributedLockOnRelations(truncateStatement->relations, AccessExclusiveLock, + lockAcquiringMode); } @@ -345,131 +346,3 @@ ExecuteTruncateStmtSequentialIfNecessary(TruncateStmt *command) } } } - - -/* - * LockTruncatedRelationMetadataInWorkers determines if distributed - * lock is necessary for truncated relations, and acquire locks. - * - * LockTruncatedRelationMetadataInWorkers handles distributed locking - * of truncated tables before standard utility takes over. - * - * Actual distributed truncation occurs inside truncate trigger. - * - * This is only for distributed serialization of truncate commands. - * The function assumes that there is no foreign key relation between - * non-distributed and distributed relations. - */ -static void -LockTruncatedRelationMetadataInWorkers(TruncateStmt *truncateStatement) -{ - List *distributedRelationList = NIL; - - /* nothing to do if there is no metadata at worker nodes */ - if (!ClusterHasKnownMetadataWorkers()) - { - return; - } - - RangeVar *rangeVar = NULL; - foreach_ptr(rangeVar, truncateStatement->relations) - { - Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); - Oid referencingRelationId = InvalidOid; - - if (!IsCitusTable(relationId)) - { - continue; - } - - if (list_member_oid(distributedRelationList, relationId)) - { - continue; - } - - distributedRelationList = lappend_oid(distributedRelationList, relationId); - - CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); - Assert(cacheEntry != NULL); - - List *referencingTableList = cacheEntry->referencingRelationsViaForeignKey; - foreach_oid(referencingRelationId, referencingTableList) - { - distributedRelationList = list_append_unique_oid(distributedRelationList, - referencingRelationId); - } - } - - if (distributedRelationList != NIL) - { - AcquireDistributedLockOnRelations(distributedRelationList, AccessExclusiveLock); - } -} - - -/* - * AcquireDistributedLockOnRelations acquire a distributed lock on worker nodes - * for given list of relations ids. Relation id list and worker node list - * sorted so that the lock is acquired in the same order regardless of which - * node it was run on. Notice that no lock is acquired on coordinator node. - * - * Notice that the locking functions is sent to all workers regardless of if - * it has metadata or not. This is because a worker node only knows itself - * and previous workers that has metadata sync turned on. The node does not - * know about other nodes that have metadata sync turned on afterwards. - */ -static void -AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode) -{ - Oid relationId = InvalidOid; - List *workerNodeList = ActivePrimaryNodeList(NoLock); - const char *lockModeText = LockModeToLockModeText(lockMode); - - /* - * We want to acquire locks in the same order across the nodes. - * Although relation ids may change, their ordering will not. - */ - relationIdList = SortList(relationIdList, CompareOids); - workerNodeList = SortList(workerNodeList, CompareWorkerNodes); - - UseCoordinatedTransaction(); - - int32 localGroupId = GetLocalGroupId(); - - foreach_oid(relationId, relationIdList) - { - /* - * We only acquire distributed lock on relation if - * the relation is sync'ed between mx nodes. - * - * Even if users disable metadata sync, we cannot - * allow them not to acquire the remote locks. - * Hence, we have !IsCoordinator() check. - */ - if (ShouldSyncTableMetadata(relationId) || !IsCoordinator()) - { - char *qualifiedRelationName = generate_qualified_relation_name(relationId); - StringInfo lockRelationCommand = makeStringInfo(); - - appendStringInfo(lockRelationCommand, LOCK_RELATION_IF_EXISTS, - quote_literal_cstr(qualifiedRelationName), - lockModeText); - - WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) - { - const char *nodeName = workerNode->workerName; - int nodePort = workerNode->workerPort; - - /* if local node is one of the targets, acquire the lock locally */ - if (workerNode->groupId == localGroupId) - { - LockRelationOid(relationId, lockMode); - continue; - } - - SendCommandToWorker(nodeName, nodePort, lockRelationCommand->data); - } - } - } -} diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 4973aafd0..9931f430a 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -90,8 +90,6 @@ bool EnableCreateTypePropagation = true; /* forward declaration for helper functions*/ -static List * FilterNameListForDistributedTypes(List *objects, bool missing_ok); -static List * TypeNameListToObjectAddresses(List *objects); static TypeName * MakeTypeNameFromRangeVar(const RangeVar *relation); static Oid GetTypeOwner(Oid typeOid); static Oid LookupNonAssociatedArrayTypeNameOid(ParseState *pstate, @@ -104,365 +102,6 @@ static List * CompositeTypeColumnDefList(Oid typeOid); static CreateEnumStmt * RecreateEnumStmt(Oid typeOid); static List * EnumValsList(Oid typeOid); -static bool ShouldPropagateTypeCreate(void); - - -/* - * PreprocessCompositeTypeStmt is called during the creation of a composite type. It is executed - * before the statement is applied locally. - * - * We decide if the compisite type needs to be replicated to the worker, and if that is - * the case return a list of DDLJob's that describe how and where the type needs to be - * created. - * - * Since the planning happens before the statement has been applied locally we do not have - * access to the ObjectAddress of the new type. - */ -List * -PreprocessCompositeTypeStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - if (!ShouldPropagateTypeCreate()) - { - return NIL; - } - - /* - * managing types can only be done on the coordinator if ddl propagation is on. when - * it is off we will never get here - */ - EnsureCoordinator(); - - /* fully qualify before lookup and later deparsing */ - QualifyTreeNode(node); - - return NIL; -} - - -/* - * PostprocessCompositeTypeStmt is executed after the type has been created locally and before - * we create it on the remote servers. Here we have access to the ObjectAddress of the new - * type which we use to make sure the type's dependencies are on all nodes. - */ -List * -PostprocessCompositeTypeStmt(Node *node, const char *queryString) -{ - /* same check we perform during planning of the statement */ - if (!ShouldPropagateTypeCreate()) - { - return NIL; - } - - /* - * find object address of the just created object, because the type has been created - * locally it can't be missing - */ - ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); - - /* If the type has any unsupported dependency, create it locally */ - DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&typeAddress); - if (errMsg != NULL) - { - RaiseDeferredError(errMsg, WARNING); - return NIL; - } - - /* - * when we allow propagation within a transaction block we should make sure to only - * allow this in sequential mode - */ - EnsureSequentialMode(OBJECT_TYPE); - - EnsureDependenciesExistOnAllNodes(&typeAddress); - - /* - * reconstruct creation statement in a portable fashion. The create_or_replace helper - * function will be used to create the type in an idempotent manner on the workers. - * - * Types could exist on the worker prior to being created on the coordinator when the - * type previously has been attempted to be created in a transaction which did not - * commit on the coordinator. - */ - const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(node); - compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) compositeTypeStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterTypeStmt is invoked for alter type statements for composite types. - * - * Normally we would have a process step as well to re-ensure dependencies exists, however - * this is already implemented by the post processing for adding columns to tables. - */ -List * -PreprocessAlterTypeStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterTableStmt *stmt = castNode(AlterTableStmt, node); - Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_TYPE); - - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - /* reconstruct alter statement in a portable fashion */ - QualifyTreeNode((Node *) stmt); - const char *alterTypeStmtSql = DeparseTreeNode((Node *) stmt); - - /* - * all types that are distributed will need their alter statements propagated - * regardless if in a transaction or not. If we would not propagate the alter - * statement the types would be different on worker and coordinator. - */ - EnsureSequentialMode(OBJECT_TYPE); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterTypeStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessCreateEnumStmt is called before the statement gets applied locally. - * - * It decides if the create statement will be applied to the workers and if that is the - * case returns a list of DDLJobs that will be executed _after_ the statement has been - * applied locally. - * - * Since planning is done before we have created the object locally we do not have an - * ObjectAddress for the new type just yet. - */ -List * -PreprocessCreateEnumStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - if (!ShouldPropagateTypeCreate()) - { - return NIL; - } - - /* managing types can only be done on the coordinator */ - EnsureCoordinator(); - - /* enforce fully qualified typeName for correct deparsing and lookup */ - QualifyTreeNode(node); - - return NIL; -} - - -/* - * PostprocessCreateEnumStmt is called after the statement has been applied locally, but - * before the plan on how to create the types on the workers has been executed. - * - * We apply the same checks to verify if the type should be distributed, if that is the - * case we resolve the ObjectAddress for the just created object, distribute its - * dependencies to all the nodes, and mark the object as distributed. - */ -List * -PostprocessCreateEnumStmt(Node *node, const char *queryString) -{ - if (!ShouldPropagateTypeCreate()) - { - return NIL; - } - - /* lookup type address of just created type */ - ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); - - DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&typeAddress); - if (errMsg != NULL) - { - RaiseDeferredError(errMsg, WARNING); - return NIL; - } - - /* - * when we allow propagation within a transaction block we should make sure to only - * allow this in sequential mode - */ - EnsureSequentialMode(OBJECT_TYPE); - - EnsureDependenciesExistOnAllNodes(&typeAddress); - - /* reconstruct creation statement in a portable fashion */ - const char *createEnumStmtSql = DeparseCreateEnumStmt(node); - createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createEnumStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessAlterEnumStmt handles ALTER TYPE ... ADD VALUE for enum based types. Planning - * happens before the statement has been applied locally. - * - * Since it is an alter of an existing type we actually have the ObjectAddress. This is - * used to check if the type is distributed, if so the alter will be executed on the - * workers directly to keep the types in sync across the cluster. - */ -List * -PreprocessAlterEnumStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - /* - * alter enum will run for all distributed enums, regardless if in a transaction or - * not since the enum will be different on the coordinator and workers if we didn't. - * (adding values to an enum can not run in a transaction anyway and would error by - * postgres already). - */ - EnsureSequentialMode(OBJECT_TYPE); - - /* - * managing types can only be done on the coordinator if ddl propagation is on. when - * it is off we will never get here - */ - EnsureCoordinator(); - - QualifyTreeNode(node); - const char *alterEnumStmtSql = DeparseTreeNode(node); - - /* - * Before pg12 ALTER ENUM ... ADD VALUE could not be within a xact block. Instead of - * creating a DDLTaksList we won't return anything here. During the processing phase - * we directly connect to workers and execute the commands remotely. - */ - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) alterEnumStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessDropTypeStmt is called for all DROP TYPE statements. For all types in the list that - * citus has distributed to the workers it will drop the type on the workers as well. If - * no types in the drop list are distributed no calls will be made to the workers. - */ -List * -PreprocessDropTypeStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - DropStmt *stmt = castNode(DropStmt, node); - - /* - * We swap the list of objects to remove during deparse so we need a reference back to - * the old list to put back - */ - List *oldTypes = stmt->objects; - - if (!ShouldPropagate()) - { - return NIL; - } - - List *distributedTypes = FilterNameListForDistributedTypes(oldTypes, - stmt->missing_ok); - if (list_length(distributedTypes) <= 0) - { - /* no distributed types to drop */ - return NIL; - } - - /* - * managing types can only be done on the coordinator if ddl propagation is on. when - * it is off we will never get here. MX workers don't have a notion of distributed - * types, so we block the call. - */ - EnsureCoordinator(); - - /* - * remove the entries for the distributed objects on dropping - */ - List *distributedTypeAddresses = TypeNameListToObjectAddresses(distributedTypes); - ObjectAddress *address = NULL; - foreach_ptr(address, distributedTypeAddresses) - { - UnmarkObjectDistributed(address); - } - - /* - * temporary swap the lists of objects to delete with the distributed objects and - * deparse to an executable sql statement for the workers - */ - stmt->objects = distributedTypes; - char *dropStmtSql = DeparseTreeNode((Node *) stmt); - stmt->objects = oldTypes; - - EnsureSequentialMode(OBJECT_TYPE); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - dropStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PreprocessRenameTypeStmt is called when the user is renaming the type. The invocation happens - * before the statement is applied locally. - * - * As the type already exists we have access to the ObjectAddress for the type, this is - * used to check if the type is distributed. If the type is distributed the rename is - * executed on all the workers to keep the types in sync across the cluster. - */ -List * -PreprocessRenameTypeStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - /* fully qualify */ - QualifyTreeNode(node); - - /* deparse sql*/ - const char *renameStmtSql = DeparseTreeNode(node); - - EnsureSequentialMode(OBJECT_TYPE); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) renameStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - /* * PreprocessRenameTypeAttributeStmt is called for changes of attribute names for composite * types. Planning is called before the statement is applied locally. @@ -499,98 +138,6 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString, } -/* - * PreprocessAlterTypeSchemaStmt is executed before the statement is applied to the local - * postgres instance. - * - * In this stage we can prepare the commands that need to be run on all workers. - */ -List * -PreprocessAlterTypeSchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_TYPE); - - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - EnsureSequentialMode(OBJECT_TYPE); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - -/* - * PostprocessAlterTypeSchemaStmt is executed after the change has been applied locally, we - * can now use the new dependencies of the type to ensure all its dependencies exist on - * the workers before we apply the commands remotely. - */ -List * -PostprocessAlterTypeSchemaStmt(Node *node, const char *queryString) -{ - AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - Assert(stmt->objectType == OBJECT_TYPE); - - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&typeAddress); - - return NIL; -} - - -/* - * PreprocessAlterTypeOwnerStmt is called for change of ownership of types before the - * ownership is changed on the local instance. - * - * If the type for which the owner is changed is distributed we execute the change on all - * the workers to keep the type in sync across the cluster. - */ -List * -PreprocessAlterTypeOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) -{ - AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); - Assert(stmt->objectType == OBJECT_TYPE); - - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) - { - return NIL; - } - - EnsureCoordinator(); - - QualifyTreeNode((Node *) stmt); - const char *sql = DeparseTreeNode((Node *) stmt); - - EnsureSequentialMode(OBJECT_TYPE); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); -} - - /* * CreateTypeStmtByObjectAddress returns a parsetree for the CREATE TYPE statement to * recreate the type by its object address. @@ -878,7 +425,7 @@ AlterTypeSchemaStmtObjectAddress(Node *node, bool missing_ok) */ /* typename is the last in the list of names */ - Value *typeNameStr = lfirst(list_tail(names)); + String *typeNameStr = lfirst(list_tail(names)); /* * we don't error here either, as the error would be not a good user facing @@ -1051,60 +598,6 @@ GenerateBackupNameForTypeCollision(const ObjectAddress *address) } -/* - * FilterNameListForDistributedTypes takes a list of objects to delete, for Types this - * will be a list of TypeName. This list is filtered against the types that are - * distributed. - * - * The original list will not be touched, a new list will be created with only the objects - * in there. - */ -static List * -FilterNameListForDistributedTypes(List *objects, bool missing_ok) -{ - List *result = NIL; - TypeName *typeName = NULL; - foreach_ptr(typeName, objects) - { - Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress typeAddress = { 0 }; - - if (!OidIsValid(typeOid)) - { - continue; - } - - ObjectAddressSet(typeAddress, TypeRelationId, typeOid); - if (IsObjectDistributed(&typeAddress)) - { - result = lappend(result, typeName); - } - } - return result; -} - - -/* - * TypeNameListToObjectAddresses transforms a List * of TypeName *'s into a List * of - * ObjectAddress *'s. For this to succeed all Types identified by the TypeName *'s should - * exist on this postgres, an error will be thrown otherwise. - */ -static List * -TypeNameListToObjectAddresses(List *objects) -{ - List *result = NIL; - TypeName *typeName = NULL; - foreach_ptr(typeName, objects) - { - Oid typeOid = LookupTypeNameOid(NULL, typeName, false); - ObjectAddress *typeAddress = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*typeAddress, TypeRelationId, typeOid); - result = lappend(result, typeAddress); - } - return result; -} - - /* * GetTypeOwner * @@ -1145,47 +638,6 @@ MakeTypeNameFromRangeVar(const RangeVar *relation) } -/* - * ShouldPropagateTypeCreate returns if we should propagate the creation of a type. - * - * There are two moments we decide to not directly propagate the creation of a type. - * - During the creation of an Extension; we assume the type will be created by creating - * the extension on the worker - * - During a transaction block; if types are used in a distributed table in the same - * block we can only provide parallelism on the table if we do not change to sequential - * mode. Types will be propagated outside of this transaction to the workers so that - * the transaction can use 1 connection per shard and fully utilize citus' parallelism - */ -static bool -ShouldPropagateTypeCreate() -{ - if (!ShouldPropagate()) - { - return false; - } - - if (!EnableCreateTypePropagation) - { - /* - * Administrator has turned of type creation propagation - */ - return false; - } - - /* - * by not propagating in a transaction block we allow for parallelism to be used when - * this type will be used as a column in a table that will be created and distributed - * in this same transaction. - */ - if (!ShouldPropagateCreateInCoordinatedTransction()) - { - return false; - } - - return true; -} - - /* * LookupNonAssociatedArrayTypeNameOid returns the oid of the type with the given type name * that is not an array type that is associated to another user defined type. diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index ad069df3d..f46b559d5 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -44,6 +44,7 @@ #include "commands/extension.h" #include "commands/tablecmds.h" #include "distributed/adaptive_executor.h" +#include "distributed/backend_data.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/multi_copy.h" @@ -76,6 +77,7 @@ #include "nodes/makefuncs.h" #include "tcop/utility.h" #include "utils/builtins.h" +#include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -88,6 +90,7 @@ static int activeDropSchemaOrDBs = 0; static bool ConstraintDropped = false; +ProcessUtility_hook_type PrevProcessUtility = NULL; int UtilityHookLevel = 0; @@ -166,7 +169,6 @@ multi_ProcessUtility(PlannedStmt *pstmt, parsetree = pstmt->utilityStmt; if (IsA(parsetree, TransactionStmt) || - IsA(parsetree, LockStmt) || IsA(parsetree, ListenStmt) || IsA(parsetree, NotifyStmt) || IsA(parsetree, ExecuteStmt) || @@ -183,8 +185,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, * that state. Since we never need to intercept transaction statements, * skip our checks and immediately fall into standard_ProcessUtility. */ - standard_ProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility_compat(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); return; } @@ -203,9 +205,22 @@ multi_ProcessUtility(PlannedStmt *pstmt, parsetree); if (strcmp(createExtensionStmt->extname, "citus") == 0) { - if (get_extension_oid("citus_columnar", true) == InvalidOid) + double versionNumber = strtod(CITUS_MAJORVERSION, NULL); + DefElem *newVersionValue = GetExtensionOption(createExtensionStmt->options, + "new_version"); + + if (newVersionValue) { - CreateExtensionWithVersion("citus_columnar", NULL); + char *newVersion = strdup(defGetString(newVersionValue)); + versionNumber = GetExtensionVersionNumber(newVersion); + } + + if (versionNumber * 100 >= 1110.0) + { + if (get_extension_oid("citus_columnar", true) == InvalidOid) + { + CreateExtensionWithVersion("citus_columnar", NULL); + } } } } @@ -216,8 +231,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, * Ensure that utility commands do not behave any differently until CREATE * EXTENSION is invoked. */ - standard_ProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility_compat(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); return; } @@ -248,8 +263,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, PG_TRY(); { - standard_ProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility_compat(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); StoredProcedureLevel -= 1; @@ -282,8 +297,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, PG_TRY(); { - standard_ProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility_compat(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); DoBlockLevel -= 1; } @@ -485,6 +500,18 @@ ProcessUtilityInternal(PlannedStmt *pstmt, PreprocessTruncateStatement((TruncateStmt *) parsetree); } + if (IsA(parsetree, LockStmt)) + { + /* + * PreprocessLockStatement might lock the relations locally if the + * node executing the command is in pg_dist_node. Even though the process + * utility will re-acquire the locks across the same relations if the node + * is in the metadata (in the pg_dist_node table) that should not be a problem, + * plus it ensures consistent locking order between the nodes. + */ + PreprocessLockStatement((LockStmt *) parsetree, context); + } + /* * We only process ALTER TABLE ... ATTACH PARTITION commands in the function below * and distribute the partition if necessary. @@ -505,6 +532,20 @@ ProcessUtilityInternal(PlannedStmt *pstmt, parsetree = pstmt->utilityStmt; ops = GetDistributeObjectOps(parsetree); + /* + * For some statements Citus defines a Qualify function. The goal of this function + * is to take any ambiguity from the statement that is contextual on either the + * search_path or current settings. + * Instead of relying on the search_path and settings we replace any deduced bits + * and fill them out how postgres would resolve them. This makes subsequent + * deserialize calls for the statement portable to other postgres servers, the + * workers in our case. + */ + if (ops && ops->qualify) + { + ops->qualify(parsetree); + } + if (ops && ops->preprocess) { ddlJobs = ops->preprocess(parsetree, queryString, context); @@ -610,30 +651,81 @@ ProcessUtilityInternal(PlannedStmt *pstmt, /*upgrade citus */ DefElem *newVersionValue = GetExtensionOption( ((AlterExtensionStmt *) parsetree)->options, "new_version"); - Oid citusExtensionOid = get_extension_oid("citus", true); - char *curExtensionVersion = get_extension_version(citusExtensionOid); Oid citusColumnarOid = get_extension_oid("citus_columnar", true); if (newVersionValue) { - const char *newVersion = defGetString(newVersionValue); + char *newVersion = defGetString(newVersionValue); + double newVersionNumber = GetExtensionVersionNumber(strdup(newVersion)); - /*alter extension citus update to 11.1-1, and no citus_columnar installed */ - if (strcmp(newVersion, "11.1-1") == 0 && citusColumnarOid == InvalidOid) + /*alter extension citus update to version >= 11.1-1, and no citus_columnar installed */ + if (newVersionNumber * 100 >= 1110 && citusColumnarOid == InvalidOid) { - /*it's upgrade citus to 11.1-1 */ + /*it's upgrade citus to version later or equal to 11.1-1 */ CreateExtensionWithVersion("citus_columnar", "11.1-0"); } - else if (strcmp(curExtensionVersion, "11.1-1") == 0 && citusColumnarOid != - InvalidOid) + else if (newVersionNumber * 100 < 1110 && citusColumnarOid != InvalidOid) { /*downgrade citus_columnar to Y */ AlterExtensionUpdateStmt("citus_columnar", "11.1-0"); } } + else + { + double versionNumber = strtod(CITUS_MAJORVERSION, NULL); + if (versionNumber * 100 >= 1110) + { + if (citusColumnarOid == InvalidOid) + { + CreateExtensionWithVersion("citus_columnar", "11.1-0"); + } + } + } } - standard_ProcessUtility_compat(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + PrevProcessUtility_compat(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); + + if (isAlterExtensionUpdateCitusStmt) + { + DefElem *newVersionValue = GetExtensionOption( + ((AlterExtensionStmt *) parsetree)->options, "new_version"); + Oid citusColumnarOid = get_extension_oid("citus_columnar", true); + if (newVersionValue) + { + char *newVersion = defGetString(newVersionValue); + double newVersionNumber = GetExtensionVersionNumber(strdup(newVersion)); + if (newVersionNumber * 100 >= 1110 && citusColumnarOid != InvalidOid) + { + /*after "ALTER EXTENSION citus" updates citus_columnar Y to version Z. */ + char *curColumnarVersion = get_extension_version(citusColumnarOid); + if (strcmp(curColumnarVersion, "11.1-0") == 0) + { + AlterExtensionUpdateStmt("citus_columnar", "11.1-1"); + } + } + else if (newVersionNumber * 100 < 1110 && citusColumnarOid != InvalidOid) + { + /*after "ALTER EXTENSION citus" drops citus_columnar extension */ + char *curColumnarVersion = get_extension_version(citusColumnarOid); + if (strcmp(curColumnarVersion, "11.1-0") == 0) + { + RemoveExtensionById(citusColumnarOid); + } + } + } + else + { + double versionNumber = strtod(CITUS_MAJORVERSION, NULL); + if (versionNumber * 100 >= 1110) + { + char *curColumnarVersion = get_extension_version(citusColumnarOid); + if (strcmp(curColumnarVersion, "11.1-0") == 0) + { + AlterExtensionUpdateStmt("citus_columnar", "11.1-1"); + } + } + } + } if (isAlterExtensionUpdateCitusStmt) { @@ -1115,16 +1207,20 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) EnsureCoordinator(); - Oid targetRelationId = ddlJob->targetRelationId; + ObjectAddress targetObjectAddress = ddlJob->targetObjectAddress; - if (OidIsValid(targetRelationId)) + if (OidIsValid(targetObjectAddress.classId)) { /* - * Only for ddlJobs that are targetting a relation (table) we want to sync - * its metadata and verify some properties around the table. + * Only for ddlJobs that are targetting an object we want to sync + * its metadata. */ - shouldSyncMetadata = ShouldSyncTableMetadata(targetRelationId); - EnsurePartitionTableNotReplicated(targetRelationId); + shouldSyncMetadata = ShouldSyncUserCommandForObject(targetObjectAddress); + + if (targetObjectAddress.classId == RelationRelationId) + { + EnsurePartitionTableNotReplicated(targetObjectAddress.objectId); + } } bool localExecutionSupported = true; @@ -1375,7 +1471,7 @@ CreateCustomDDLTaskList(Oid relationId, TableDDLCommand *command) } DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = relationId; + ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->metadataSyncCommand = GetTableDDLCommand(command); ddlJob->taskList = taskList; @@ -1626,7 +1722,7 @@ NodeDDLTaskList(TargetWorkerSet targets, List *commands) } DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetRelationId = InvalidOid; + ddlJob->targetObjectAddress = InvalidObjectAddress; ddlJob->metadataSyncCommand = NULL; ddlJob->taskList = list_make1(task); @@ -1654,26 +1750,3 @@ DropSchemaOrDBInProgress(void) { return activeDropSchemaOrDBs > 0; } - - -/* - * ColumnarTableSetOptionsHook propagates columnar table options to shards, if - * necessary. - */ -void -ColumnarTableSetOptionsHook(Oid relationId, ColumnarOptions options) -{ - if (EnableDDLPropagation && IsCitusTable(relationId)) - { - /* when a columnar table is distributed update all settings on the shards */ - Oid namespaceId = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(namespaceId); - char *relationName = get_rel_name(relationId); - TableDDLCommand *command = ColumnarGetCustomTableOptionsDDL(schemaName, - relationName, - options); - DDLJob *ddljob = CreateCustomDDLTaskList(relationId, command); - - ExecuteDistributedDDLJob(ddljob); - } -} diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 7f1e04f76..9b1e0bfb3 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -432,7 +432,7 @@ DeparseVacuumColumnNames(List *columnNameList) appendStringInfoString(columnNames, " ("); - Value *columnName = NULL; + String *columnName = NULL; foreach_ptr(columnName, columnNameList) { appendStringInfo(columnNames, "%s,", strVal(columnName)); diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c new file mode 100644 index 000000000..554741310 --- /dev/null +++ b/src/backend/distributed/commands/view.c @@ -0,0 +1,705 @@ +/*------------------------------------------------------------------------- + * + * view.c + * Commands for distributing CREATE OR REPLACE VIEW statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "fmgr.h" + +#include "access/genam.h" +#include "catalog/objectaddress.h" +#include "commands/extension.h" +#include "distributed/commands.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/errormessage.h" +#include "distributed/listutils.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" +#include "distributed/multi_executor.h" +#include "distributed/namespace_utils.h" +#include "distributed/worker_transaction.h" +#include "executor/spi.h" +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +static List * FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok); +static void AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid); +static void AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid); +static void AppendAliasesToCreateViewCommand(StringInfo createViewCommand, Oid viewOid); +static void AppendOptionsToCreateViewCommand(StringInfo createViewCommand, Oid viewOid); + +/* + * PreprocessViewStmt is called during the planning phase for CREATE OR REPLACE VIEW + * before it is created on the local node internally. + */ +List * +PreprocessViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!ShouldPropagate()) + { + return NIL; + } + + /* check creation against multi-statement transaction policy */ + if (!ShouldPropagateCreateInCoordinatedTransction()) + { + return NIL; + } + + EnsureCoordinator(); + + return NIL; +} + + +/* + * PostprocessViewStmt actually creates the commmands we need to run on workers to + * propagate views. + * + * If view depends on any undistributable object, Citus can not distribute it. In order to + * not to prevent users from creating local views on the coordinator WARNING message will + * be sent to the customer about the case instead of erroring out. If no worker nodes exist + * at all, view will be created locally without any WARNING message. + * + * Besides creating the plan we also make sure all (new) dependencies of the view are + * created on all nodes. + */ +List * +PostprocessViewStmt(Node *node, const char *queryString) +{ + ViewStmt *stmt = castNode(ViewStmt, node); + + if (!ShouldPropagate()) + { + return NIL; + } + + /* check creation against multi-statement transaction policy */ + if (!ShouldPropagateCreateInCoordinatedTransction()) + { + return NIL; + } + + ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + + if (IsObjectAddressOwnedByExtension(&viewAddress, NULL)) + { + return NIL; + } + + /* If the view has any unsupported dependency, create it locally */ + if (ErrorOrWarnIfObjectHasUnsupportedDependency(&viewAddress)) + { + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&viewAddress); + + char *command = CreateViewDDLCommand(viewAddress.objectId); + + /* + * We'd typically use NodeDDLTaskList() for generating node-level DDL commands, + * such as when creating a type. However, views are different in a sense that + * views do not depend on citus tables. Instead, they are `depending` on citus tables. + * + * When NodeDDLTaskList() used, it should be accompanied with sequential execution. + * Here, we do something equivalent to NodeDDLTaskList(), but using metadataSyncCommand + * field. This hack allows us to use the metadata connection + * (see `REQUIRE_METADATA_CONNECTION` flag). Meaning that, view creation is treated as + * a metadata operation. + * + * We do this mostly for performance reasons, because we cannot afford to switch to + * sequential execution, for instance when we are altering or creating distributed + * tables -- which may require significant resources. + * + * The downside of using this hack is that if a view is re-used in the same transaction + * that creates the view on the workers, we might get errors such as the below which + * we consider a decent trade-off currently: + * + * BEGIN; + * CREATE VIEW dist_view .. + * CRETAE TABLE t2(id int, val dist_view); + * + * -- shard creation fails on one of the connections + * SELECT create_distributed_table('t2', 'id'); + * ERROR: type "public.dist_view" does not exist + * + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ddlJob->targetObjectAddress = viewAddress; + ddlJob->metadataSyncCommand = command; + ddlJob->taskList = NIL; + + return list_make1(ddlJob); +} + + +/* + * ViewStmtObjectAddress returns the ObjectAddress for the subject of the + * CREATE [OR REPLACE] VIEW statement. + */ +ObjectAddress +ViewStmtObjectAddress(Node *node, bool missing_ok) +{ + ViewStmt *stmt = castNode(ViewStmt, node); + + Oid viewOid = RangeVarGetRelid(stmt->view, NoLock, missing_ok); + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + return viewAddress; +} + + +/* + * PreprocessDropViewStmt gets called during the planning phase of a DROP VIEW statement + * and returns a list of DDLJob's that will drop any distributed view from the + * workers. + * + * The DropStmt could have multiple objects to drop, the list of objects will be filtered + * to only keep the distributed views for deletion on the workers. Non-distributed + * views will still be dropped locally but not on the workers. + */ +List * +PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContext + processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + + if (!ShouldPropagate()) + { + return NIL; + } + + List *distributedViewNames = FilterNameListForDistributedViews(stmt->objects, + stmt->missing_ok); + + if (list_length(distributedViewNames) < 1) + { + /* no distributed view to drop */ + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_VIEW); + + /* + * Swap the list of objects before deparsing and restore the old list after. This + * ensures we only have distributed views in the deparsed drop statement. + */ + DropStmt *stmtCopy = copyObject(stmt); + stmtCopy->objects = distributedViewNames; + + QualifyTreeNode((Node *) stmtCopy); + const char *dropStmtSql = DeparseTreeNode((Node *) stmtCopy); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * FilterNameListForDistributedViews takes a list of view names and filters against the + * views that are distributed. + * + * The original list will not be touched, a new list will be created with only the objects + * in there. + */ +static List * +FilterNameListForDistributedViews(List *viewNamesList, bool missing_ok) +{ + List *distributedViewNames = NIL; + + List *possiblyQualifiedViewName = NULL; + foreach_ptr(possiblyQualifiedViewName, viewNamesList) + { + char *viewName = NULL; + char *schemaName = NULL; + DeconstructQualifiedName(possiblyQualifiedViewName, &schemaName, &viewName); + + if (schemaName == NULL) + { + char *objName = NULL; + Oid schemaOid = QualifiedNameGetCreationNamespace(possiblyQualifiedViewName, + &objName); + schemaName = get_namespace_name(schemaOid); + } + + Oid schemaId = get_namespace_oid(schemaName, missing_ok); + Oid viewOid = get_relname_relid(viewName, schemaId); + + if (!OidIsValid(viewOid)) + { + continue; + } + + if (IsViewDistributed(viewOid)) + { + distributedViewNames = lappend(distributedViewNames, + possiblyQualifiedViewName); + } + } + + return distributedViewNames; +} + + +/* + * CreateViewDDLCommand returns the DDL command to create the view addressed by + * the viewAddress. + */ +char * +CreateViewDDLCommand(Oid viewOid) +{ + StringInfo createViewCommand = makeStringInfo(); + + appendStringInfoString(createViewCommand, "CREATE OR REPLACE VIEW "); + + AppendQualifiedViewNameToCreateViewCommand(createViewCommand, viewOid); + AppendAliasesToCreateViewCommand(createViewCommand, viewOid); + AppendOptionsToCreateViewCommand(createViewCommand, viewOid); + AppendViewDefinitionToCreateViewCommand(createViewCommand, viewOid); + + return createViewCommand->data; +} + + +/* + * AppendQualifiedViewNameToCreateViewCommand adds the qualified view of the given view + * oid to the given create view command. + */ +static void +AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid) +{ + char *viewName = get_rel_name(viewOid); + char *schemaName = get_namespace_name(get_rel_namespace(viewOid)); + char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + + appendStringInfo(buf, "%s ", qualifiedViewName); +} + + +/* + * AppendAliasesToCreateViewCommand appends aliases to the create view + * command for the existing view. + */ +static void +AppendAliasesToCreateViewCommand(StringInfo createViewCommand, Oid viewOid) +{ + /* Get column name aliases from pg_attribute */ + ScanKeyData key[1]; + ScanKeyInit(&key[0], + Anum_pg_attribute_attrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(viewOid)); + + Relation maprel = table_open(AttributeRelationId, AccessShareLock); + Relation mapidx = index_open(AttributeRelidNumIndexId, AccessShareLock); + SysScanDesc pgAttributeScan = systable_beginscan_ordered(maprel, mapidx, NULL, 1, + key); + + bool isInitialAlias = true; + bool hasAlias = false; + HeapTuple attributeTuple; + while (HeapTupleIsValid(attributeTuple = systable_getnext_ordered(pgAttributeScan, + ForwardScanDirection))) + { + Form_pg_attribute att = (Form_pg_attribute) GETSTRUCT(attributeTuple); + const char *aliasName = quote_identifier(NameStr(att->attname)); + + if (isInitialAlias) + { + appendStringInfoString(createViewCommand, "("); + } + else + { + appendStringInfoString(createViewCommand, ","); + } + + appendStringInfoString(createViewCommand, aliasName); + + hasAlias = true; + isInitialAlias = false; + } + + if (hasAlias) + { + appendStringInfoString(createViewCommand, ") "); + } + + systable_endscan_ordered(pgAttributeScan); + index_close(mapidx, AccessShareLock); + table_close(maprel, AccessShareLock); +} + + +/* + * AppendOptionsToCreateViewCommand add relation options to create view command + * for an existing view + */ +static void +AppendOptionsToCreateViewCommand(StringInfo createViewCommand, Oid viewOid) +{ + /* Add rel options to create view command */ + char *relOptions = flatten_reloptions(viewOid); + if (relOptions != NULL) + { + appendStringInfo(createViewCommand, "WITH (%s) ", relOptions); + } +} + + +/* + * AppendViewDefinitionToCreateViewCommand adds the definition of the given view to the + * given create view command. + */ +static void +AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid) +{ + /* + * Set search_path to NIL so that all objects outside of pg_catalog will be + * schema-prefixed. + */ + OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); + overridePath->schemas = NIL; + overridePath->addCatalog = true; + PushOverrideSearchPath(overridePath); + + /* + * Push the transaction snapshot to be able to get vief definition with pg_get_viewdef + */ + PushActiveSnapshot(GetTransactionSnapshot()); + + Datum viewDefinitionDatum = DirectFunctionCall1(pg_get_viewdef, + ObjectIdGetDatum(viewOid)); + char *viewDefinition = TextDatumGetCString(viewDefinitionDatum); + + PopActiveSnapshot(); + PopOverrideSearchPath(); + + appendStringInfo(buf, "AS %s ", viewDefinition); +} + + +/* + * AlterViewOwnerCommand returns the command to alter view owner command for the + * given view or materialized view oid. + */ +char * +AlterViewOwnerCommand(Oid viewOid) +{ + /* Add alter owner commmand */ + StringInfo alterOwnerCommand = makeStringInfo(); + + char *viewName = get_rel_name(viewOid); + Oid schemaOid = get_rel_namespace(viewOid); + char *schemaName = get_namespace_name(schemaOid); + + char *viewOwnerName = TableOwner(viewOid); + char *qualifiedViewName = NameListToQuotedString(list_make2(makeString(schemaName), + makeString(viewName))); + + if (get_rel_relkind(viewOid) == RELKIND_MATVIEW) + { + appendStringInfo(alterOwnerCommand, "ALTER MATERIALIZED VIEW %s ", + qualifiedViewName); + } + else + { + appendStringInfo(alterOwnerCommand, "ALTER VIEW %s ", qualifiedViewName); + } + + appendStringInfo(alterOwnerCommand, "OWNER TO %s", quote_identifier(viewOwnerName)); + + return alterOwnerCommand->data; +} + + +/* + * IsViewDistributed checks if a view is distributed + */ +bool +IsViewDistributed(Oid viewOid) +{ + Assert(get_rel_relkind(viewOid) == RELKIND_VIEW); + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + return IsObjectDistributed(&viewAddress); +} + + +/* + * PreprocessAlterViewStmt is invoked for alter view statements. + */ +List * +PreprocessAlterViewStmt(Node *node, const char *queryString, ProcessUtilityContext + processUtilityContext) +{ + AlterTableStmt *stmt = castNode(AlterTableStmt, node); + + ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); + if (!ShouldPropagateObject(&viewAddress)) + { + return NIL; + } + + QualifyTreeNode((Node *) stmt); + + EnsureCoordinator(); + + /* reconstruct alter statement in a portable fashion */ + const char *alterViewStmtSql = DeparseTreeNode((Node *) stmt); + + /* + * To avoid sequential mode, we are using metadata connection. For the + * detailed explanation, please check the comment on PostprocessViewStmt. + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ddlJob->targetObjectAddress = viewAddress; + ddlJob->metadataSyncCommand = alterViewStmtSql; + ddlJob->taskList = NIL; + + return list_make1(ddlJob); +} + + +/* + * PostprocessAlterViewStmt is invoked for alter view statements. + */ +List * +PostprocessAlterViewStmt(Node *node, const char *queryString) +{ + AlterTableStmt *stmt = castNode(AlterTableStmt, node); + Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_VIEW); + + ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); + if (!ShouldPropagateObject(&viewAddress)) + { + return NIL; + } + + if (IsObjectAddressOwnedByExtension(&viewAddress, NULL)) + { + return NIL; + } + + /* If the view has any unsupported dependency, create it locally */ + if (ErrorOrWarnIfObjectHasUnsupportedDependency(&viewAddress)) + { + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&viewAddress); + + return NIL; +} + + +/* + * AlterViewStmtObjectAddress returns the ObjectAddress for the subject of the + * ALTER VIEW statement. + */ +ObjectAddress +AlterViewStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterTableStmt *stmt = castNode(AlterTableStmt, node); + Oid viewOid = RangeVarGetRelid(stmt->relation, NoLock, missing_ok); + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + return viewAddress; +} + + +/* + * PreprocessRenameViewStmt is called when the user is renaming the view or the column of + * the view. + */ +List * +PreprocessRenameViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + ObjectAddress viewAddress = GetObjectAddressFromParseTree(node, true); + if (!ShouldPropagateObject(&viewAddress)) + { + return NIL; + } + + EnsureCoordinator(); + + /* fully qualify */ + QualifyTreeNode(node); + + /* deparse sql*/ + const char *renameStmtSql = DeparseTreeNode(node); + + /* + * To avoid sequential mode, we are using metadata connection. For the + * detailed explanation, please check the comment on PostprocessViewStmt. + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ddlJob->targetObjectAddress = viewAddress; + ddlJob->metadataSyncCommand = renameStmtSql; + ddlJob->taskList = NIL; + + return list_make1(ddlJob); +} + + +/* + * RenameViewStmtObjectAddress returns the ObjectAddress of the view that is the object + * of the RenameStmt. Errors if missing_ok is false. + */ +ObjectAddress +RenameViewStmtObjectAddress(Node *node, bool missing_ok) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + + Oid viewOid = RangeVarGetRelid(stmt->relation, NoLock, missing_ok); + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + return viewAddress; +} + + +/* + * PreprocessAlterViewSchemaStmt is executed before the statement is applied to the local + * postgres instance. + */ +List * +PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + + ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); + if (!ShouldPropagateObject(&viewAddress)) + { + return NIL; + } + + EnsureCoordinator(); + + QualifyTreeNode((Node *) stmt); + + const char *sql = DeparseTreeNode((Node *) stmt); + + /* + * To avoid sequential mode, we are using metadata connection. For the + * detailed explanation, please check the comment on PostprocessViewStmt. + */ + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); + ddlJob->targetObjectAddress = viewAddress; + ddlJob->metadataSyncCommand = sql; + ddlJob->taskList = NIL; + + return list_make1(ddlJob); +} + + +/* + * PostprocessAlterViewSchemaStmt is executed after the change has been applied locally, we + * can now use the new dependencies of the view to ensure all its dependencies exist on + * the workers before we apply the commands remotely. + */ +List * +PostprocessAlterViewSchemaStmt(Node *node, const char *queryString) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + + ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); + if (!ShouldPropagateObject(&viewAddress)) + { + return NIL; + } + + /* dependencies have changed (schema) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&viewAddress); + + return NIL; +} + + +/* + * AlterViewSchemaStmtObjectAddress returns the ObjectAddress of the view that is the object + * of the alter schema statement. + */ +ObjectAddress +AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + + Oid viewOid = RangeVarGetRelid(stmt->relation, NoLock, true); + + /* + * Since it can be called both before and after executing the standardProcess utility, + * we need to check both old and new schemas + */ + if (viewOid == InvalidOid) + { + Oid schemaId = get_namespace_oid(stmt->newschema, missing_ok); + viewOid = get_relname_relid(stmt->relation->relname, schemaId); + + /* + * if the view is still invalid we couldn't find the view, error with the same + * message postgres would error with it missing_ok is false (not ok to miss) + */ + if (!missing_ok && viewOid == InvalidOid) + { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("view \"%s\" does not exist", + stmt->relation->relname))); + } + } + + ObjectAddress viewAddress = { 0 }; + ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + + return viewAddress; +} + + +/* + * IsViewRenameStmt returns whether the passed-in RenameStmt is the following + * form: + * + * - ALTER VIEW RENAME + * - ALTER VIEW RENAME COLUMN + */ +bool +IsViewRenameStmt(RenameStmt *renameStmt) +{ + bool isViewRenameStmt = false; + + if (renameStmt->renameType == OBJECT_VIEW || + (renameStmt->renameType == OBJECT_COLUMN && + renameStmt->relationType == OBJECT_VIEW)) + { + isViewRenameStmt = true; + } + + return isViewRenameStmt; +} diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index 7b89b3e96..df6096321 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -1466,28 +1466,6 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection } -/* - * IsRebalancerInitiatedBackend returns true if we are in a backend that citus - * rebalancer initiated. - */ -bool -IsRebalancerInternalBackend(void) -{ - return application_name && strcmp(application_name, CITUS_REBALANCER_NAME) == 0; -} - - -/* - * IsCitusInitiatedRemoteBackend returns true if we are in a backend that citus - * initiated via remote connection. - */ -bool -IsCitusInternalBackend(void) -{ - return ExtractGlobalPID(application_name) != INVALID_CITUS_INTERNAL_BACKEND_GPID; -} - - /* * ResetConnection preserves the given connection for later usage by * resetting its states. diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index 4c1aae6bf..2445a69f3 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -1115,3 +1115,92 @@ SendCancelationRequest(MultiConnection *connection) return cancelSent; } + + +/* + * EvaluateSingleQueryResult gets the query result from connection and returns + * true if the query is executed successfully, false otherwise. A query result + * or an error message is returned in queryResultString. The function requires + * that the query returns a single column/single row result. It returns an + * error otherwise. + */ +bool +EvaluateSingleQueryResult(MultiConnection *connection, PGresult *queryResult, + StringInfo queryResultString) +{ + bool success = false; + + ExecStatusType resultStatus = PQresultStatus(queryResult); + if (resultStatus == PGRES_COMMAND_OK) + { + char *commandStatus = PQcmdStatus(queryResult); + appendStringInfo(queryResultString, "%s", commandStatus); + success = true; + } + else if (resultStatus == PGRES_TUPLES_OK) + { + int ntuples = PQntuples(queryResult); + int nfields = PQnfields(queryResult); + + /* error if query returns more than 1 rows, or more than 1 fields */ + if (nfields != 1) + { + appendStringInfo(queryResultString, + "expected a single column in query target"); + } + else if (ntuples > 1) + { + appendStringInfo(queryResultString, + "expected a single row in query result"); + } + else + { + int row = 0; + int column = 0; + if (!PQgetisnull(queryResult, row, column)) + { + char *queryResultValue = PQgetvalue(queryResult, row, column); + appendStringInfo(queryResultString, "%s", queryResultValue); + } + success = true; + } + } + else + { + StoreErrorMessage(connection, queryResultString); + } + + return success; +} + + +/* + * StoreErrorMessage gets the error message from connection and stores it + * in queryResultString. It should be called only when error is present + * otherwise it would return a default error message. + */ +void +StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString) +{ + char *errorMessage = PQerrorMessage(connection->pgConn); + if (errorMessage != NULL) + { + /* copy the error message to a writable memory */ + errorMessage = pnstrdup(errorMessage, strlen(errorMessage)); + + char *firstNewlineIndex = strchr(errorMessage, '\n'); + + /* trim the error message at the line break */ + if (firstNewlineIndex != NULL) + { + *firstNewlineIndex = '\0'; + } + } + else + { + /* put a default error message if no error message is reported */ + errorMessage = "An error occurred while running the query"; + } + + appendStringInfo(queryResultString, "%s", errorMessage); +} diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 3da845d3b..ff8da5991 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -80,7 +80,6 @@ static void deparse_index_columns(StringInfo buffer, List *indexParameterList, static void AppendStorageParametersToString(StringInfo stringBuffer, List *optionList); static void simple_quote_literal(StringInfo buf, const char *val); -static char * flatten_reloptions(Oid relid); static void AddVacuumParams(ReindexStmt *reindexStmt, StringInfo buffer); @@ -1231,7 +1230,7 @@ pg_get_replica_identity_command(Oid tableRelationId) * This function comes from PostgreSQL source code in * src/backend/utils/adt/ruleutils.c */ -static char * +char * flatten_reloptions(Oid relid) { char *result = NULL; diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 0ebc69238..b72787993 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -11,6 +11,8 @@ #include "postgres.h" +#include "pg_version_compat.h" + #include "catalog/namespace.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" @@ -44,6 +46,6 @@ AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) appendStringInfo(buf, "ALTER DATABASE %s OWNER TO %s;", - quote_identifier(strVal((Value *) stmt->object)), + quote_identifier(strVal((String *) stmt->object)), RoleSpecString(stmt->newowner, true)); } diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c index 62c5f98c8..805f24f90 100644 --- a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c +++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c @@ -223,7 +223,7 @@ AppendDropForeignServerStmt(StringInfo buf, DropStmt *stmt) static void AppendServerNames(StringInfo buf, DropStmt *stmt) { - Value *serverValue = NULL; + String *serverValue = NULL; foreach_ptr(serverValue, stmt->objects) { const char *serverString = quote_identifier(strVal(serverValue)); diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c index d58faabfb..93bb65b4d 100644 --- a/src/backend/distributed/deparser/deparse_function_stmts.c +++ b/src/backend/distributed/deparser/deparse_function_stmts.c @@ -396,18 +396,18 @@ AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt) appendStringInfo(buf, " SET %s =", quote_identifier(setStmt->name)); } - Value value = varArgConst->val; - switch (value.type) + Node *value = (Node *) &varArgConst->val; + switch (value->type) { case T_Integer: { - appendStringInfo(buf, " %d", intVal(&value)); + appendStringInfo(buf, " %d", intVal(value)); break; } case T_Float: { - appendStringInfo(buf, " %s", strVal(&value)); + appendStringInfo(buf, " %s", strVal(value)); break; } @@ -428,7 +428,7 @@ AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt) Datum interval = DirectFunctionCall3(interval_in, - CStringGetDatum(strVal(&value)), + CStringGetDatum(strVal(value)), ObjectIdGetDatum(InvalidOid), Int32GetDatum(typmod)); @@ -440,7 +440,7 @@ AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt) else { appendStringInfo(buf, " %s", quote_literal_cstr(strVal( - &value))); + value))); } break; } diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index ebc76d5e8..21ea16fbe 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -126,7 +126,7 @@ AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt) appendStringInfoString(buf, "IF EXISTS "); } - Value *schemaValue = NULL; + String *schemaValue = NULL; foreach_ptr(schemaValue, stmt->objects) { const char *schemaString = quote_identifier(strVal(schemaValue)); diff --git a/src/backend/distributed/deparser/deparse_sequence_stmts.c b/src/backend/distributed/deparser/deparse_sequence_stmts.c index e6cb36146..0680e7a20 100644 --- a/src/backend/distributed/deparser/deparse_sequence_stmts.c +++ b/src/backend/distributed/deparser/deparse_sequence_stmts.c @@ -86,12 +86,6 @@ AppendSequenceNameList(StringInfo buf, List *objects, ObjectType objtype) RangeVar *seq = makeRangeVarFromNameList((List *) lfirst(objectCell)); - if (seq->schemaname == NULL) - { - Oid schemaOid = RangeVarGetCreationNamespace(seq); - seq->schemaname = get_namespace_name(schemaOid); - } - char *qualifiedSequenceName = quote_qualified_identifier(seq->schemaname, seq->relname); appendStringInfoString(buf, qualifiedSequenceName); diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index fb1e67977..90828cc67 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -200,10 +200,10 @@ AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) static void AppendStatisticsName(StringInfo buf, CreateStatsStmt *stmt) { - Value *schemaNameVal = (Value *) linitial(stmt->defnames); + String *schemaNameVal = (String *) linitial(stmt->defnames); const char *schemaName = quote_identifier(strVal(schemaNameVal)); - Value *statNameVal = (Value *) lsecond(stmt->defnames); + String *statNameVal = (String *) lsecond(stmt->defnames); const char *statName = quote_identifier(strVal(statNameVal)); appendStringInfo(buf, "%s.%s", schemaName, statName); @@ -220,7 +220,7 @@ AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt) appendStringInfoString(buf, " ("); - Value *statType = NULL; + String *statType = NULL; foreach_ptr(statType, stmt->stat_types) { appendStringInfoString(buf, strVal(statType)); diff --git a/src/backend/distributed/deparser/deparse_text_search.c b/src/backend/distributed/deparser/deparse_text_search.c index 43d162678..e0c750d0d 100644 --- a/src/backend/distributed/deparser/deparse_text_search.c +++ b/src/backend/distributed/deparser/deparse_text_search.c @@ -464,7 +464,7 @@ DeparseTextSearchDictionaryCommentStmt(Node *node) static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes) { - Value *tokentype = NULL; + String *tokentype = NULL; bool first = true; foreach_ptr(tokentype, tokentypes) { diff --git a/src/backend/distributed/deparser/deparse_view_stmts.c b/src/backend/distributed/deparser/deparse_view_stmts.c new file mode 100644 index 000000000..39c4ccb63 --- /dev/null +++ b/src/backend/distributed/deparser/deparse_view_stmts.c @@ -0,0 +1,310 @@ +/*------------------------------------------------------------------------- + * + * deparse_view_stmts.c + * + * All routines to deparse view statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "commands/defrem.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + +static void AppendDropViewStmt(StringInfo buf, DropStmt *stmt); +static void AppendViewNameList(StringInfo buf, List *objects); +static void AppendAlterViewStmt(StringInfo buf, AlterTableStmt *stmt); +static void AppendAlterViewCmd(StringInfo buf, AlterTableCmd *alterTableCmd); +static void AppendAlterViewOwnerStmt(StringInfo buf, AlterTableCmd *alterTableCmd); +static void AppendAlterViewSetOptionsStmt(StringInfo buf, AlterTableCmd *alterTableCmd); +static void AppendAlterViewResetOptionsStmt(StringInfo buf, AlterTableCmd *alterTableCmd); +static void AppendRenameViewStmt(StringInfo buf, RenameStmt *stmt); +static void AppendAlterViewSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); + +/* + * DeparseDropViewStmt deparses the given DROP VIEW statement. + */ +char * +DeparseDropViewStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + Assert(stmt->removeType == OBJECT_VIEW); + + AppendDropViewStmt(&str, stmt); + + return str.data; +} + + +/* + * AppendDropViewStmt appends the deparsed representation of given drop stmt + * to the given string info buffer. + */ +static void +AppendDropViewStmt(StringInfo buf, DropStmt *stmt) +{ + /* + * already tested at call site, but for future it might be collapsed in a + * DeparseDropStmt so be safe and check again + */ + Assert(stmt->removeType == OBJECT_VIEW); + + appendStringInfo(buf, "DROP VIEW "); + if (stmt->missing_ok) + { + appendStringInfoString(buf, "IF EXISTS "); + } + AppendViewNameList(buf, stmt->objects); + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + appendStringInfoString(buf, ";"); +} + + +/* + * AppendViewNameList appends the qualified view names by constructing them from the given + * objects list to the given string info buffer. Note that, objects must hold schema + * qualified view names as its' members. + */ +static void +AppendViewNameList(StringInfo buf, List *viewNamesList) +{ + bool isFirstView = true; + List *qualifiedViewName = NULL; + foreach_ptr(qualifiedViewName, viewNamesList) + { + char *quotedQualifiedVieName = NameListToQuotedString(qualifiedViewName); + if (!isFirstView) + { + appendStringInfo(buf, ", "); + } + + appendStringInfoString(buf, quotedQualifiedVieName); + isFirstView = false; + } +} + + +/* + * DeparseAlterViewStmt deparses the given ALTER VIEW statement. + */ +char * +DeparseAlterViewStmt(Node *node) +{ + AlterTableStmt *stmt = castNode(AlterTableStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendAlterViewStmt(&str, stmt); + + return str.data; +} + + +static void +AppendAlterViewStmt(StringInfo buf, AlterTableStmt *stmt) +{ + const char *identifier = quote_qualified_identifier(stmt->relation->schemaname, + stmt->relation->relname); + + appendStringInfo(buf, "ALTER VIEW %s ", identifier); + + AlterTableCmd *alterTableCmd = castNode(AlterTableCmd, lfirst(list_head(stmt->cmds))); + AppendAlterViewCmd(buf, alterTableCmd); + + appendStringInfoString(buf, ";"); +} + + +static void +AppendAlterViewCmd(StringInfo buf, AlterTableCmd *alterTableCmd) +{ + switch (alterTableCmd->subtype) + { + case AT_ChangeOwner: + { + AppendAlterViewOwnerStmt(buf, alterTableCmd); + break; + } + + case AT_SetRelOptions: + { + AppendAlterViewSetOptionsStmt(buf, alterTableCmd); + break; + } + + case AT_ResetRelOptions: + { + AppendAlterViewResetOptionsStmt(buf, alterTableCmd); + break; + } + + case AT_ColumnDefault: + { + elog(ERROR, "Citus doesn't support setting or resetting default values for a " + "column of view"); + break; + } + + default: + { + /* + * ALTER VIEW command only supports for the cases checked above but an + * ALTER TABLE commands targeting views may have different cases. To let + * PG throw the right error locally, we don't throw any error here + */ + break; + } + } +} + + +static void +AppendAlterViewOwnerStmt(StringInfo buf, AlterTableCmd *alterTableCmd) +{ + appendStringInfo(buf, "OWNER TO %s", RoleSpecString(alterTableCmd->newowner, true)); +} + + +static void +AppendAlterViewSetOptionsStmt(StringInfo buf, AlterTableCmd *alterTableCmd) +{ + ListCell *lc = NULL; + bool initialOption = true; + foreach(lc, (List *) alterTableCmd->def) + { + DefElem *def = (DefElem *) lfirst(lc); + + if (initialOption) + { + appendStringInfo(buf, "SET ("); + initialOption = false; + } + else + { + appendStringInfo(buf, ","); + } + + appendStringInfo(buf, "%s", def->defname); + if (def->arg != NULL) + { + appendStringInfo(buf, "="); + appendStringInfo(buf, "%s", defGetString(def)); + } + } + + appendStringInfo(buf, ")"); +} + + +static void +AppendAlterViewResetOptionsStmt(StringInfo buf, AlterTableCmd *alterTableCmd) +{ + ListCell *lc = NULL; + bool initialOption = true; + foreach(lc, (List *) alterTableCmd->def) + { + DefElem *def = (DefElem *) lfirst(lc); + + if (initialOption) + { + appendStringInfo(buf, "RESET ("); + initialOption = false; + } + else + { + appendStringInfo(buf, ","); + } + + appendStringInfo(buf, "%s", def->defname); + } + + appendStringInfo(buf, ")"); +} + + +char * +DeparseRenameViewStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendRenameViewStmt(&str, stmt); + + return str.data; +} + + +static void +AppendRenameViewStmt(StringInfo buf, RenameStmt *stmt) +{ + switch (stmt->renameType) + { + case OBJECT_COLUMN: + { + const char *identifier = + quote_qualified_identifier(stmt->relation->schemaname, + stmt->relation->relname); + appendStringInfo(buf, "ALTER VIEW %s RENAME COLUMN %s TO %s;", identifier, + quote_identifier(stmt->subname), quote_identifier( + stmt->newname)); + break; + } + + case OBJECT_VIEW: + { + const char *identifier = + quote_qualified_identifier(stmt->relation->schemaname, + stmt->relation->relname); + appendStringInfo(buf, "ALTER VIEW %s RENAME TO %s;", identifier, + quote_identifier(stmt->newname)); + break; + } + + default: + { + ereport(ERROR, (errmsg("unsupported subtype for alter view rename command"), + errdetail("sub command type: %d", stmt->renameType))); + } + } +} + + +char * +DeparseAlterViewSchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendAlterViewSchemaStmt(&str, stmt); + + return str.data; +} + + +static void +AppendAlterViewSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt) +{ + const char *identifier = quote_qualified_identifier(stmt->relation->schemaname, + stmt->relation->relname); + appendStringInfo(buf, "ALTER VIEW %s SET SCHEMA %s;", identifier, quote_identifier( + stmt->newschema)); +} diff --git a/src/backend/distributed/deparser/qualify_sequence_stmt.c b/src/backend/distributed/deparser/qualify_sequence_stmt.c index efff68c72..60e169e39 100644 --- a/src/backend/distributed/deparser/qualify_sequence_stmt.c +++ b/src/backend/distributed/deparser/qualify_sequence_stmt.c @@ -17,7 +17,9 @@ #include "postgres.h" +#include "distributed/commands.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" #include "distributed/version_compat.h" #include "parser/parse_func.h" #include "utils/lsyscache.h" @@ -38,8 +40,13 @@ QualifyAlterSequenceOwnerStmt(Node *node) if (seq->schemaname == NULL) { - Oid schemaOid = RangeVarGetCreationNamespace(seq); - seq->schemaname = get_namespace_name(schemaOid); + Oid seqOid = RangeVarGetRelid(seq, NoLock, stmt->missing_ok); + + if (OidIsValid(seqOid)) + { + Oid schemaOid = get_rel_namespace(seqOid); + seq->schemaname = get_namespace_name(schemaOid); + } } } @@ -59,8 +66,13 @@ QualifyAlterSequenceSchemaStmt(Node *node) if (seq->schemaname == NULL) { - Oid schemaOid = RangeVarGetCreationNamespace(seq); - seq->schemaname = get_namespace_name(schemaOid); + Oid seqOid = RangeVarGetRelid(seq, NoLock, stmt->missing_ok); + + if (OidIsValid(seqOid)) + { + Oid schemaOid = get_rel_namespace(seqOid); + seq->schemaname = get_namespace_name(schemaOid); + } } } @@ -80,7 +92,48 @@ QualifyRenameSequenceStmt(Node *node) if (seq->schemaname == NULL) { - Oid schemaOid = RangeVarGetCreationNamespace(seq); - seq->schemaname = get_namespace_name(schemaOid); + Oid seqOid = RangeVarGetRelid(seq, NoLock, stmt->missing_ok); + + if (OidIsValid(seqOid)) + { + Oid schemaOid = get_rel_namespace(seqOid); + seq->schemaname = get_namespace_name(schemaOid); + } } } + + +/* + * QualifyDropSequenceStmt transforms a DROP SEQUENCE + * statement in place and makes the sequence name fully qualified. + */ +void +QualifyDropSequenceStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + + Assert(stmt->removeType == OBJECT_SEQUENCE); + + List *objectNameListWithSchema = NIL; + List *objectNameList = NULL; + foreach_ptr(objectNameList, stmt->objects) + { + RangeVar *seq = makeRangeVarFromNameList(objectNameList); + + if (seq->schemaname == NULL) + { + Oid seqOid = RangeVarGetRelid(seq, NoLock, stmt->missing_ok); + + if (OidIsValid(seqOid)) + { + Oid schemaOid = get_rel_namespace(seqOid); + seq->schemaname = get_namespace_name(schemaOid); + } + } + + objectNameListWithSchema = lappend(objectNameListWithSchema, + MakeNameListFromRangeVar(seq)); + } + + stmt->objects = objectNameListWithSchema; +} diff --git a/src/backend/distributed/deparser/qualify_statistics_stmt.c b/src/backend/distributed/deparser/qualify_statistics_stmt.c index b8a7ce30e..b176b66c2 100644 --- a/src/backend/distributed/deparser/qualify_statistics_stmt.c +++ b/src/backend/distributed/deparser/qualify_statistics_stmt.c @@ -15,15 +15,19 @@ #include "postgres.h" #include "catalog/namespace.h" +#include "catalog/pg_statistic_ext.h" #include "distributed/commands.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "nodes/parsenodes.h" #include "nodes/value.h" +#include "utils/syscache.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" +static Oid GetStatsNamespaceOid(Oid statsOid); + void QualifyCreateStatisticsStmt(Node *node) { @@ -68,8 +72,14 @@ QualifyDropStatisticsStmt(Node *node) if (stat->schemaname == NULL) { - Oid schemaOid = RangeVarGetCreationNamespace(stat); - stat->schemaname = get_namespace_name(schemaOid); + Oid statsOid = get_statistics_object_oid(objectNameList, + dropStatisticsStmt->missing_ok); + + if (OidIsValid(statsOid)) + { + Oid schemaOid = GetStatsNamespaceOid(statsOid); + stat->schemaname = get_namespace_name(schemaOid); + } } objectNameListWithSchema = lappend(objectNameListWithSchema, @@ -94,7 +104,14 @@ QualifyAlterStatisticsRenameStmt(Node *node) if (list_length(nameList) == 1) { RangeVar *stat = makeRangeVarFromNameList(nameList); - Oid schemaOid = RangeVarGetCreationNamespace(stat); + Oid statsOid = get_statistics_object_oid(nameList, renameStmt->missing_ok); + + if (!OidIsValid(statsOid)) + { + return; + } + + Oid schemaOid = GetStatsNamespaceOid(statsOid); stat->schemaname = get_namespace_name(schemaOid); renameStmt->object = (Node *) MakeNameListFromRangeVar(stat); } @@ -115,7 +132,14 @@ QualifyAlterStatisticsSchemaStmt(Node *node) if (list_length(nameList) == 1) { RangeVar *stat = makeRangeVarFromNameList(nameList); - Oid schemaOid = RangeVarGetCreationNamespace(stat); + Oid statsOid = get_statistics_object_oid(nameList, stmt->missing_ok); + + if (!OidIsValid(statsOid)) + { + return; + } + + Oid schemaOid = GetStatsNamespaceOid(statsOid); stat->schemaname = get_namespace_name(schemaOid); stmt->object = (Node *) MakeNameListFromRangeVar(stat); } @@ -136,7 +160,14 @@ QualifyAlterStatisticsStmt(Node *node) if (list_length(stmt->defnames) == 1) { RangeVar *stat = makeRangeVarFromNameList(stmt->defnames); - Oid schemaOid = RangeVarGetCreationNamespace(stat); + Oid statsOid = get_statistics_object_oid(stmt->defnames, stmt->missing_ok); + + if (!OidIsValid(statsOid)) + { + return; + } + + Oid schemaOid = GetStatsNamespaceOid(statsOid); stat->schemaname = get_namespace_name(schemaOid); stmt->defnames = MakeNameListFromRangeVar(stat); } @@ -159,8 +190,40 @@ QualifyAlterStatisticsOwnerStmt(Node *node) if (list_length(nameList) == 1) { RangeVar *stat = makeRangeVarFromNameList(nameList); - Oid schemaOid = RangeVarGetCreationNamespace(stat); + Oid statsOid = get_statistics_object_oid(nameList, /* missing_ok */ true); + + if (!OidIsValid(statsOid)) + { + return; + } + + Oid schemaOid = GetStatsNamespaceOid(statsOid); stat->schemaname = get_namespace_name(schemaOid); stmt->object = (Node *) MakeNameListFromRangeVar(stat); } } + + +/* + * GetStatsNamespaceOid takes the id of a Statistics object and returns + * the id of the schema that the statistics object belongs to. + * Errors out if the stats object is not found. + */ +static Oid +GetStatsNamespaceOid(Oid statsOid) +{ + HeapTuple heapTuple = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statsOid)); + if (!HeapTupleIsValid(heapTuple)) + { + ereport(ERROR, (errmsg("cache lookup failed for statistics " + "object with oid %u", statsOid))); + } + FormData_pg_statistic_ext *statisticsForm = + (FormData_pg_statistic_ext *) GETSTRUCT(heapTuple); + + Oid result = statisticsForm->stxnamespace; + + ReleaseSysCache(heapTuple); + + return result; +} diff --git a/src/backend/distributed/deparser/qualify_view_stmt.c b/src/backend/distributed/deparser/qualify_view_stmt.c new file mode 100644 index 000000000..b787bf9b7 --- /dev/null +++ b/src/backend/distributed/deparser/qualify_view_stmt.c @@ -0,0 +1,107 @@ +/*------------------------------------------------------------------------- + * + * qualify_view_stmt.c + * Functions specialized in fully qualifying all view statements. These + * functions are dispatched from qualify.c + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "nodes/nodes.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" + +static void QualifyViewRangeVar(RangeVar *view); + +/* + * QualifyDropViewStmt quailifies the view names of the DROP VIEW statement. + */ +void +QualifyDropViewStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + List *qualifiedViewNames = NIL; + + List *possiblyQualifiedViewName = NULL; + foreach_ptr(possiblyQualifiedViewName, stmt->objects) + { + char *viewName = NULL; + char *schemaName = NULL; + DeconstructQualifiedName(possiblyQualifiedViewName, &schemaName, &viewName); + + if (schemaName == NULL) + { + char *objname = NULL; + Oid schemaOid = QualifiedNameGetCreationNamespace(possiblyQualifiedViewName, + &objname); + schemaName = get_namespace_name(schemaOid); + List *qualifiedViewName = list_make2(makeString(schemaName), + makeString(viewName)); + qualifiedViewNames = lappend(qualifiedViewNames, qualifiedViewName); + } + else + { + qualifiedViewNames = lappend(qualifiedViewNames, possiblyQualifiedViewName); + } + } + + stmt->objects = qualifiedViewNames; +} + + +/* + * QualifyAlterViewStmt quailifies the view name of the ALTER VIEW statement. + */ +void +QualifyAlterViewStmt(Node *node) +{ + AlterTableStmt *stmt = castNode(AlterTableStmt, node); + RangeVar *view = stmt->relation; + QualifyViewRangeVar(view); +} + + +/* + * QualifyRenameViewStmt quailifies the view name of the ALTER VIEW ... RENAME statement. + */ +void +QualifyRenameViewStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + RangeVar *view = stmt->relation; + QualifyViewRangeVar(view); +} + + +/* + * QualifyAlterViewSchemaStmt quailifies the view name of the ALTER VIEW ... SET SCHEMA statement. + */ +void +QualifyAlterViewSchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + RangeVar *view = stmt->relation; + QualifyViewRangeVar(view); +} + + +/* + * QualifyViewRangeVar qualifies the given view RangeVar if it is not qualified. + */ +static void +QualifyViewRangeVar(RangeVar *view) +{ + if (view->schemaname == NULL) + { + Oid viewOid = RelnameGetRelid(view->relname); + Oid schemaOid = get_rel_namespace(viewOid); + view->schemaname = get_namespace_name(schemaOid); + } +} diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 2b32916ee..6ab27d078 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -152,6 +152,7 @@ #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" +#include "distributed/param_utils.h" #include "distributed/placement_access.h" #include "distributed/placement_connection.h" #include "distributed/relation_access_tracking.h" @@ -171,7 +172,6 @@ #include "storage/fd.h" #include "storage/latch.h" #include "utils/builtins.h" -#include "utils/int8.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" @@ -831,6 +831,19 @@ AdaptiveExecutor(CitusScanState *scanState) distributedPlan->modLevel, taskList, excludeFromXact); bool localExecutionSupported = true; + + /* + * In some rare cases, we have prepared statements that pass a parameter + * and never used in the query, mark such parameters' type as Invalid(0), + * which will be used later in ExtractParametersFromParamList() to map them + * to a generic datatype. Skip for dynamic parameters. + */ + if (paramListInfo && !paramListInfo->paramFetch) + { + paramListInfo = copyParamList(paramListInfo); + MarkUnreferencedExternParams((Node *) job->jobQuery, paramListInfo); + } + DistributedExecution *execution = CreateDistributedExecution( distributedPlan->modLevel, taskList, @@ -1321,7 +1334,8 @@ StartDistributedExecution(DistributedExecution *execution) /* make sure we are not doing remote execution from within a task */ if (execution->remoteTaskList != NIL) { - EnsureRemoteTaskExecutionAllowed(); + bool isRemote = true; + EnsureTaskExecutionAllowed(isRemote); } } @@ -4513,7 +4527,7 @@ ReceiveResults(WorkerSession *session, bool storeRows) /* if there are multiple replicas, make sure to consider only one */ if (storeRows && *currentAffectedTupleString != '\0') { - scanint8(currentAffectedTupleString, false, ¤tAffectedTupleCount); + currentAffectedTupleCount = pg_strtoint64(currentAffectedTupleString); Assert(currentAffectedTupleCount >= 0); execution->rowsProcessed += currentAffectedTupleCount; } diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 7c0426c0a..5c1f0981e 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -108,26 +108,26 @@ bool EnableLocalExecution = true; bool LogLocalCommands = false; -int LocalExecutorLevel = 0; +/* global variable that tracks whether the local execution is on a shard */ +uint64 LocalExecutorShardId = INVALID_SHARD_ID; static LocalExecutionStatus CurrentLocalExecutionStatus = LOCAL_EXECUTION_OPTIONAL; -static uint64 ExecuteLocalTaskListInternal(List *taskList, - ParamListInfo paramListInfo, - DistributedPlan *distributedPlan, - TupleDestination *defaultTupleDest, - bool isUtilityCommand); static void SplitLocalAndRemotePlacements(List *taskPlacementList, List **localTaskPlacementList, List **remoteTaskPlacementList); -static uint64 ExecuteLocalTaskPlan(PlannedStmt *taskPlan, char *queryString, - TupleDestination *tupleDest, Task *task, - ParamListInfo paramListInfo); +static uint64 LocallyExecuteTaskPlan(PlannedStmt *taskPlan, char *queryString, + TupleDestination *tupleDest, Task *task, + ParamListInfo paramListInfo); +static uint64 ExecuteTaskPlan(PlannedStmt *taskPlan, char *queryString, + TupleDestination *tupleDest, Task *task, + ParamListInfo paramListInfo); static void RecordNonDistTableAccessesForTask(Task *task); static void LogLocalCommand(Task *task); static uint64 LocallyPlanAndExecuteMultipleQueries(List *queryStrings, TupleDestination *tupleDest, Task *task); +static void LocallyExecuteUtilityTask(Task *task); static void ExecuteUdfTaskQuery(Query *localUdfCommandQuery); static void EnsureTransitionPossible(LocalExecutionStatus from, LocalExecutionStatus to); @@ -204,50 +204,7 @@ ExecuteLocalTaskListExtended(List *taskList, TupleDestination *defaultTupleDest, bool isUtilityCommand) { - uint64 totalRowsProcessed = 0; ParamListInfo paramListInfo = copyParamList(orig_paramListInfo); - - /* - * Even if we are executing local tasks, we still enable - * coordinated transaction. This is because - * (a) we might be in a transaction, and the next commands may - * require coordinated transaction - * (b) we might be executing some tasks locally and the others - * via remote execution - * - * Also, there is no harm enabling coordinated transaction even if - * we only deal with local tasks in the transaction. - */ - UseCoordinatedTransaction(); - - LocalExecutorLevel++; - PG_TRY(); - { - totalRowsProcessed = ExecuteLocalTaskListInternal(taskList, paramListInfo, - distributedPlan, - defaultTupleDest, - isUtilityCommand); - } - PG_CATCH(); - { - LocalExecutorLevel--; - - PG_RE_THROW(); - } - PG_END_TRY(); - LocalExecutorLevel--; - - return totalRowsProcessed; -} - - -static uint64 -ExecuteLocalTaskListInternal(List *taskList, - ParamListInfo paramListInfo, - DistributedPlan *distributedPlan, - TupleDestination *defaultTupleDest, - bool isUtilityCommand) -{ uint64 totalRowsProcessed = 0; int numParams = 0; Oid *parameterTypes = NULL; @@ -263,6 +220,12 @@ ExecuteLocalTaskListInternal(List *taskList, numParams = paramListInfo->numParams; } + if (taskList != NIL) + { + bool isRemote = false; + EnsureTaskExecutionAllowed(isRemote); + } + /* * Use a new memory context that gets reset after every task to free * the deparsed query string and query plan. @@ -304,7 +267,7 @@ ExecuteLocalTaskListInternal(List *taskList, if (isUtilityCommand) { - ExecuteUtilityCommand(TaskQueryString(task)); + LocallyExecuteUtilityTask(task); MemoryContextSwitchTo(oldContext); MemoryContextReset(loopContext); @@ -391,8 +354,8 @@ ExecuteLocalTaskListInternal(List *taskList, } totalRowsProcessed += - ExecuteLocalTaskPlan(localPlan, shardQueryString, - tupleDest, task, paramListInfo); + LocallyExecuteTaskPlan(localPlan, shardQueryString, + tupleDest, task, paramListInfo); MemoryContextSwitchTo(oldContext); MemoryContextReset(loopContext); @@ -421,9 +384,9 @@ LocallyPlanAndExecuteMultipleQueries(List *queryStrings, TupleDestination *tuple ParamListInfo paramListInfo = NULL; PlannedStmt *localPlan = planner_compat(shardQuery, cursorOptions, paramListInfo); - totalProcessedRows += ExecuteLocalTaskPlan(localPlan, queryString, - tupleDest, task, - paramListInfo); + totalProcessedRows += LocallyExecuteTaskPlan(localPlan, queryString, + tupleDest, task, + paramListInfo); } return totalProcessedRows; } @@ -444,6 +407,39 @@ ExtractParametersForLocalExecution(ParamListInfo paramListInfo, Oid **parameterT } +/* + * LocallyExecuteUtilityTask runs a utility command via local execution. + */ +static void +LocallyExecuteUtilityTask(Task *task) +{ + /* + * If we roll back to a savepoint, we may no longer be in a query on + * a shard. Reset the value as we go back up the stack. + */ + uint64 prevLocalExecutorShardId = LocalExecutorShardId; + + if (task->anchorShardId != INVALID_SHARD_ID) + { + LocalExecutorShardId = task->anchorShardId; + } + + PG_TRY(); + { + ExecuteUtilityCommand(TaskQueryString(task)); + } + PG_CATCH(); + { + LocalExecutorShardId = prevLocalExecutorShardId; + + PG_RE_THROW(); + } + PG_END_TRY(); + + LocalExecutorShardId = prevLocalExecutorShardId; +} + + /* * ExecuteUtilityCommand executes the given task query in the current * session. @@ -569,9 +565,8 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList, * At this point, we're dealing with a task that has placements on both * local and remote nodes. */ - task->partiallyLocalOrRemote = true; - Task *localTask = copyObject(task); + localTask->partiallyLocalOrRemote = true; localTask->taskPlacementList = localTaskPlacementList; *localTaskList = lappend(*localTaskList, localTask); @@ -585,6 +580,7 @@ ExtractLocalAndRemoteTasks(bool readOnly, List *taskList, List **localTaskList, /* since shard replication factor > 1, we should have at least 1 remote task */ Assert(remoteTaskPlacementList != NIL); Task *remoteTask = copyObject(task); + remoteTask->partiallyLocalOrRemote = true; remoteTask->taskPlacementList = remoteTaskPlacementList; *remoteTaskList = lappend(*remoteTaskList, remoteTask); @@ -630,9 +626,50 @@ SplitLocalAndRemotePlacements(List *taskPlacementList, List **localTaskPlacement * case of DML. */ static uint64 -ExecuteLocalTaskPlan(PlannedStmt *taskPlan, char *queryString, - TupleDestination *tupleDest, Task *task, - ParamListInfo paramListInfo) +LocallyExecuteTaskPlan(PlannedStmt *taskPlan, char *queryString, + TupleDestination *tupleDest, Task *task, + ParamListInfo paramListInfo) +{ + volatile uint64 processedRows = 0; + + /* + * If we roll back to a savepoint, we may no longer be in a query on + * a shard. Reset the value as we go back up the stack. + */ + uint64 prevLocalExecutorShardId = LocalExecutorShardId; + + if (task->anchorShardId != INVALID_SHARD_ID) + { + LocalExecutorShardId = task->anchorShardId; + } + + PG_TRY(); + { + processedRows = ExecuteTaskPlan(taskPlan, queryString, tupleDest, task, + paramListInfo); + } + PG_CATCH(); + { + LocalExecutorShardId = prevLocalExecutorShardId; + + PG_RE_THROW(); + } + PG_END_TRY(); + + LocalExecutorShardId = prevLocalExecutorShardId; + + return processedRows; +} + + +/* + * ExecuteTaskPlan executes the given planned statement and writes the results + * to tupleDest. + */ +static uint64 +ExecuteTaskPlan(PlannedStmt *taskPlan, char *queryString, + TupleDestination *tupleDest, Task *task, + ParamListInfo paramListInfo) { ScanDirection scanDirection = ForwardScanDirection; QueryEnvironment *queryEnv = create_queryEnv(); @@ -642,7 +679,7 @@ ExecuteLocalTaskPlan(PlannedStmt *taskPlan, char *queryString, RecordNonDistTableAccessesForTask(task); MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, - "ExecuteLocalTaskPlan", + "ExecuteTaskPlan", ALLOCSET_DEFAULT_SIZES); MemoryContext oldContext = MemoryContextSwitchTo(localContext); diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index f03e96b7c..fb7f687a3 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -18,6 +18,7 @@ #include "catalog/dependency.h" #include "catalog/pg_class.h" #include "catalog/namespace.h" +#include "distributed/backend_data.h" #include "distributed/citus_custom_scan.h" #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" @@ -50,6 +51,7 @@ #include "tcop/dest.h" #include "tcop/pquery.h" #include "tcop/utility.h" +#include "utils/fmgrprotos.h" #include "utils/snapmgr.h" #include "utils/memutils.h" @@ -62,6 +64,12 @@ int MultiShardConnectionType = PARALLEL_CONNECTION; bool WritableStandbyCoordinator = false; bool AllowModificationsFromWorkersToReplicatedTables = true; +/* + * Setting that controls whether distributed queries should be + * allowed within a task execution. + */ +bool AllowNestedDistributedExecution = false; + /* * Pointer to bound parameters of the current ongoing call to ExecutorRun. * If executor is not running, then this value is meaningless. @@ -87,6 +95,11 @@ static bool AlterTableConstraintCheck(QueryDesc *queryDesc); static List * FindCitusCustomScanStates(PlanState *planState); static bool CitusCustomScanStateWalker(PlanState *planState, List **citusCustomScanStates); +static bool IsTaskExecutionAllowed(bool isRemote); +static bool InLocalTaskExecutionOnShard(void); +static bool MaybeInRemoteTaskExecution(void); +static bool InTrigger(void); + /* * CitusExecutorStart is the ExecutorStart_hook that gets called when @@ -617,7 +630,8 @@ RewriteRawQueryStmt(RawStmt *rawStmt, const char *queryString, Oid *paramOids, i numParams) { List *queryTreeList = - pg_analyze_and_rewrite(rawStmt, queryString, paramOids, numParams, NULL); + pg_analyze_and_rewrite_fixedparams(rawStmt, queryString, paramOids, numParams, + NULL); if (list_length(queryTreeList) != 1) { @@ -803,6 +817,11 @@ GetObjectTypeString(ObjectType objType) return "type"; } + case OBJECT_VIEW: + { + return "view"; + } + default: { ereport(DEBUG1, (errmsg("unsupported object type"), @@ -865,43 +884,146 @@ ExecutorBoundParams(void) /* - * EnsureRemoteTaskExecutionAllowed ensures that we do not perform remote + * EnsureTaskExecutionAllowed ensures that we do not perform remote * execution from within a task. That could happen when the user calls * a function in a query that gets pushed down to the worker, and the * function performs a query on a distributed table. */ void -EnsureRemoteTaskExecutionAllowed(void) +EnsureTaskExecutionAllowed(bool isRemote) { - if (!InTaskExecution()) + if (IsTaskExecutionAllowed(isRemote)) { - /* we are not within a task, distributed execution is allowed */ return; } ereport(ERROR, (errmsg("cannot execute a distributed query from a query on a " - "shard"))); + "shard"), + errdetail("Executing a distributed query in a function call that " + "may be pushed to a remote node can lead to incorrect " + "results."), + errhint("Avoid nesting of distributed queries or use alter user " + "current_user set citus.allow_nested_distributed_execution " + "to on to allow it with possible incorrectness."))); } /* - * InTaskExecution determines whether we are currently in a task execution. + * IsTaskExecutionAllowed determines whether task execution is currently allowed. + * In general, nested distributed execution is not allowed, except in a few cases + * (forced function call delegation, triggers). + * + * We distinguish between local and remote tasks because triggers only disallow + * remote task execution. */ -bool -InTaskExecution(void) +static bool +IsTaskExecutionAllowed(bool isRemote) { - if (LocalExecutorLevel > 0) + if (AllowNestedDistributedExecution) { - /* in a local task */ + /* user explicitly allows nested execution */ return true; } - /* - * Normally, any query execution within a citus-initiated backend - * is considered a task execution, but an exception is when we - * are in a delegated function/procedure call. - */ - return IsCitusInternalBackend() && - !InTopLevelDelegatedFunctionCall && - !InDelegatedProcedureCall; + if (!isRemote) + { + if (AllowedDistributionColumnValue.isActive) + { + /* + * When we are in a forced delegated function call, we explicitly check + * whether local tasks use the same distribution column value in + * EnsureForceDelegationDistributionKey. + */ + return true; + } + + if (InTrigger()) + { + /* + * In triggers on shards we only disallow remote tasks. This has a few + * reasons: + * + * - We want to enable access to co-located shards, but do not have additional + * checks yet. + * - Users need to explicitly set enable_unsafe_triggers in order to create + * triggers on distributed tables. + * - Triggers on Citus local tables should be able to access other Citus local + * tables. + */ + return true; + } + } + + return !InLocalTaskExecutionOnShard() && !MaybeInRemoteTaskExecution(); +} + + +/* + * InLocalTaskExecutionOnShard returns whether we are currently in the local executor + * and it is working on a shard of a distributed table. + * + * In general, we can allow distributed queries inside of local executor, because + * we can correctly assign tasks to connections. However, we preemptively protect + * against distributed queries inside of queries on shards of a distributed table, + * because those might start failing after a shard move. + */ +static bool +InLocalTaskExecutionOnShard(void) +{ + if (LocalExecutorShardId == INVALID_SHARD_ID) + { + /* local executor is not active or is processing a task without shards */ + return false; + } + + if (!DistributedTableShardId(LocalExecutorShardId)) + { + /* + * Local executor is processing a query on a shard, but the shard belongs + * to a reference table or Citus local table. We do not expect those to + * move. + */ + return false; + } + + return true; +} + + +/* + * MaybeInRemoteTaskExecution returns whether we could in a remote task execution. + * + * We consider anything that happens in a Citus-internal backend, except deleged + * function or procedure calls as a potential task execution. + * + * This function will also return true in other scenarios, such as during metadata + * syncing. However, since this function is mainly used for restricting (dangerous) + * nested executions, it is good to be pessimistic. + */ +static bool +MaybeInRemoteTaskExecution(void) +{ + if (!IsCitusInternalBackend()) + { + /* in a regular, client-initiated backend doing a regular task */ + return false; + } + + if (InTopLevelDelegatedFunctionCall || InDelegatedProcedureCall) + { + /* in a citus-initiated backend, but also in a delegated a procedure call */ + return false; + } + + return true; +} + + +/* + * InTrigger returns whether the execution is currently in a trigger. + */ +static bool +InTrigger(void) +{ + return DatumGetInt32(pg_trigger_depth(NULL)) > 0; } diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 7545bb6cd..e4a863fdc 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -165,6 +165,7 @@ static bool FollowAllDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); static void ApplyAddToDependencyList(ObjectAddressCollector *collector, DependencyDefinition *definition); +static List * GetViewRuleReferenceDependencyList(Oid relationId); static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress target); static ViewDependencyNode * BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap); @@ -425,7 +426,7 @@ DependencyDefinitionFromPgDepend(ObjectAddress target) /* - * DependencyDefinitionFromPgDepend loads all pg_shdepend records describing the + * DependencyDefinitionFromPgShDepend loads all pg_shdepend records describing the * dependencies of target. */ static List * @@ -747,7 +748,8 @@ SupportedDependencyByCitus(const ObjectAddress *address) relKind == RELKIND_FOREIGN_TABLE || relKind == RELKIND_SEQUENCE || relKind == RELKIND_INDEX || - relKind == RELKIND_PARTITIONED_INDEX) + relKind == RELKIND_PARTITIONED_INDEX || + relKind == RELKIND_VIEW) { return true; } @@ -764,6 +766,58 @@ SupportedDependencyByCitus(const ObjectAddress *address) } +/* + * ErrorOrWarnIfObjectHasUnsupportedDependency returns false without throwing any message if + * object doesn't have any unsupported dependency, else throws a message with proper level + * (except the cluster doesn't have any node) and return true. + */ +bool +ErrorOrWarnIfObjectHasUnsupportedDependency(ObjectAddress *objectAddress) +{ + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(objectAddress); + if (errMsg != NULL) + { + /* + * Don't need to give any messages if there is no worker nodes in + * the cluster as user's experience won't be affected on the single node even + * if the object won't be distributed. + */ + if (!HasAnyNodes()) + { + return true; + } + + /* + * Since Citus drops and recreates some object while converting a table type + * giving a DEBUG1 message is enough if the process in table type conversion + * function call + */ + if (InTableTypeConversionFunctionCall) + { + RaiseDeferredError(errMsg, DEBUG1); + } + /* + * If the view is object distributed, we should provide an error to not have + * different definition of object on coordinator and worker nodes. If the object + * is not distributed yet, we can create it locally to not affect user's local + * usage experience. + */ + else if (IsObjectDistributed(objectAddress)) + { + RaiseDeferredError(errMsg, ERROR); + } + else + { + RaiseDeferredError(errMsg, WARNING); + } + + return true; + } + + return false; +} + + /* * DeferErrorIfHasUnsupportedDependency returns deferred error message if the given * object has any undistributable dependency. @@ -801,8 +855,11 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) * Otherwise, callers are expected to throw the error returned from this * function as a hard one by ignoring the detail part. */ - appendStringInfo(detailInfo, "\"%s\" will be created only locally", - objectDescription); + if (!IsObjectDistributed(objectAddress)) + { + appendStringInfo(detailInfo, "\"%s\" will be created only locally", + objectDescription); + } if (SupportedDependencyByCitus(undistributableDependency)) { @@ -813,9 +870,19 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) objectDescription, dependencyDescription); - appendStringInfo(hintInfo, "Distribute \"%s\" first to distribute \"%s\"", - dependencyDescription, - objectDescription); + if (IsObjectDistributed(objectAddress)) + { + appendStringInfo(hintInfo, + "Distribute \"%s\" first to modify \"%s\" on worker nodes", + dependencyDescription, + objectDescription); + } + else + { + appendStringInfo(hintInfo, "Distribute \"%s\" first to distribute \"%s\"", + dependencyDescription, + objectDescription); + } return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, errorInfo->data, detailInfo->data, hintInfo->data); @@ -893,7 +960,9 @@ GetUndistributableDependency(const ObjectAddress *objectAddress) { char relKind = get_rel_relkind(dependency->objectId); - if (relKind == RELKIND_SEQUENCE || relKind == RELKIND_COMPOSITE_TYPE) + if (relKind == RELKIND_SEQUENCE || + relKind == RELKIND_COMPOSITE_TYPE || + relKind == RELKIND_VIEW) { /* citus knows how to auto-distribute these dependencies */ continue; @@ -1307,9 +1376,26 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe * create all objects required by the indices before we create the table * including indices. */ - List *indexDependencyList = GetRelationIndicesDependencyList(relationId); result = list_concat(result, indexDependencyList); + + /* + * Get the dependencies of the rule for the given view. PG keeps internal + * dependency between view and rule. As it is stated on the PG doc, if + * there is an internal dependency, dependencies of the dependent object + * behave much like they were dependencies of the referenced object. + * + * We need to expand dependencies by including dependencies of the rule + * internally dependent to the view. PG doesn't keep any dependencies + * from view to any object, but it keeps an internal dependency to the + * rule and that rule has dependencies to other objects. + */ + char relKind = get_rel_relkind(relationId); + if (relKind == RELKIND_VIEW) + { + List *ruleRefDepList = GetViewRuleReferenceDependencyList(relationId); + result = list_concat(result, ruleRefDepList); + } } default: @@ -1322,6 +1408,64 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe } +/* + * GetViewRuleReferenceDependencyList returns the dependencies of the view's + * internal rule dependencies. + */ +static List * +GetViewRuleReferenceDependencyList(Oid viewId) +{ + List *dependencyTupleList = GetPgDependTuplesForDependingObjects(RelationRelationId, + viewId); + List *nonInternalDependenciesOfDependingRules = NIL; + + HeapTuple depTup = NULL; + foreach_ptr(depTup, dependencyTupleList) + { + Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup); + + /* + * Dependencies of the internal rule dependency should be handled as the dependency + * of referenced view object. + * + * PG doesn't keep dependency relation between views and dependent objects directly + * but it keeps an internal dependency relation between the view and the rule, then + * keeps the dependent objects of the view as non-internal dependencies of the + * internally dependent rule object. + */ + if (pg_depend->deptype == DEPENDENCY_INTERNAL && pg_depend->classid == + RewriteRelationId) + { + ObjectAddress ruleAddress = { 0 }; + ObjectAddressSet(ruleAddress, RewriteRelationId, pg_depend->objid); + + /* Expand results with the noninternal dependencies of it */ + List *ruleDependencies = DependencyDefinitionFromPgDepend(ruleAddress); + + DependencyDefinition *dependencyDef = NULL; + foreach_ptr(dependencyDef, ruleDependencies) + { + /* + * Follow all dependencies of the internally dependent rule dependencies + * except it is an internal dependency of view itself. + */ + if (dependencyDef->data.pg_depend.deptype == DEPENDENCY_INTERNAL || + (dependencyDef->data.pg_depend.refclassid == RelationRelationId && + dependencyDef->data.pg_depend.refobjid == viewId)) + { + continue; + } + + nonInternalDependenciesOfDependingRules = + lappend(nonInternalDependenciesOfDependingRules, dependencyDef); + } + } + } + + return nonInternalDependenciesOfDependingRules; +} + + /* * GetRelationSequenceDependencyList returns the sequence dependency definition * list for the given relation. diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index b345210af..172691b7d 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -46,7 +46,6 @@ #include "utils/rel.h" -static void MarkObjectDistributedLocally(const ObjectAddress *distAddress); static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress); static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, Datum *paramValues); @@ -195,7 +194,7 @@ MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress) * This function should never be called alone, MarkObjectDistributed() or * MarkObjectDistributedViaSuperUser() should be called. */ -static void +void MarkObjectDistributedLocally(const ObjectAddress *distAddress) { int paramCount = 3; diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 63c2f8695..26371b45a 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -7,7 +7,9 @@ *------------------------------------------------------------------------- */ +#include "postgres.h" #include "distributed/pg_version_constants.h" +#include "pg_version_compat.h" #include "stdint.h" #include "postgres.h" @@ -150,6 +152,7 @@ typedef struct MetadataCacheData Oid distShardShardidIndexId; Oid distPlacementShardidIndexId; Oid distPlacementPlacementidIndexId; + Oid distColocationidIndexId; Oid distPlacementGroupidIndexId; Oid distTransactionRelationId; Oid distTransactionGroupIndexId; @@ -183,6 +186,9 @@ bool EnableVersionChecks = true; /* version checks are enabled */ static bool citusVersionKnownCompatible = false; +/* Variable to determine if we are in the process of creating citus */ +static int CreateCitusTransactionLevel = 0; + /* Hash table for informations about each partition */ static HTAB *DistTableCacheHash = NULL; static List *DistTableCacheExpired = NIL; @@ -720,6 +726,24 @@ ReferenceTableShardId(uint64 shardId) } +/* + * DistributedTableShardId returns true if the given shardId belongs to + * a distributed table. + */ +bool +DistributedTableShardId(uint64 shardId) +{ + if (shardId == INVALID_SHARD_ID) + { + return false; + } + + ShardIdCacheEntry *shardIdEntry = LookupShardIdCacheEntry(shardId); + CitusTableCacheEntry *tableEntry = shardIdEntry->tableEntry; + return IsCitusTableTypeCacheEntry(tableEntry, DISTRIBUTED_TABLE); +} + + /* * LoadGroupShardPlacement returns the cached shard placement metadata * @@ -1965,6 +1989,27 @@ CitusHasBeenLoadedInternal(void) } +/* + * GetCitusCreationLevel returns the level of the transaction creating citus + */ +int +GetCitusCreationLevel(void) +{ + return CreateCitusTransactionLevel; +} + + +/* + * Sets the value of CreateCitusTransactionLevel based on int received which represents the + * nesting level of the transaction that created the Citus extension + */ +void +SetCreateCitusTransactionLevel(int val) +{ + CreateCitusTransactionLevel = val; +} + + /* * CheckCitusVersion checks whether there is a version mismatch between the * available version and the loaded version or between the installed version @@ -2504,6 +2549,17 @@ DistPlacementPlacementidIndexId(void) } +/* return oid of pg_dist_colocation_pkey */ +Oid +DistColocationIndexId(void) +{ + CachedRelationLookup("pg_dist_colocation_pkey", + &MetadataCache.distColocationidIndexId); + + return MetadataCache.distColocationidIndexId; +} + + /* return oid of pg_dist_transaction relation */ Oid DistTransactionRelationId(void) @@ -2864,8 +2920,8 @@ CurrentUserName(void) Oid LookupTypeOid(char *schemaNameSting, char *typeNameString) { - Value *schemaName = makeString(schemaNameSting); - Value *typeName = makeString(typeNameString); + String *schemaName = makeString(schemaNameSting); + String *typeName = makeString(typeNameString); List *qualifiedName = list_make2(schemaName, typeName); TypeName *enumTypeName = makeTypeNameFromNameList(qualifiedName); diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 4b62afc3b..4e50acab6 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -97,6 +97,7 @@ static char * SchemaOwnerName(Oid objectId); static bool HasMetadataWorkers(void); static void CreateShellTableOnWorkers(Oid relationId); static void CreateTableMetadataOnWorkers(Oid relationId); +static NodeMetadataSyncResult SyncNodeMetadataToNodesOptional(void); static bool ShouldSyncTableMetadataInternal(bool hashDistributed, bool citusTableWithNoDistKey); static bool SyncNodeMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError); @@ -138,6 +139,7 @@ static char * RemoteTypeIdExpression(Oid typeId); static char * RemoteCollationIdExpression(Oid colocationId); +PG_FUNCTION_INFO_V1(start_metadata_sync_to_all_nodes); PG_FUNCTION_INFO_V1(start_metadata_sync_to_node); PG_FUNCTION_INFO_V1(stop_metadata_sync_to_node); PG_FUNCTION_INFO_V1(worker_record_sequence_dependency); @@ -194,6 +196,33 @@ start_metadata_sync_to_node(PG_FUNCTION_ARGS) } +/* + * start_metadata_sync_to_all_nodes function sets hasmetadata column of + * all the primary worker nodes to true, and then activate nodes without + * replicating reference tables. + */ +Datum +start_metadata_sync_to_all_nodes(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + EnsureSuperUser(); + EnsureCoordinator(); + + List *workerNodes = ActivePrimaryNonCoordinatorNodeList(RowShareLock); + + bool prevReplicateRefTablesOnActivate = ReplicateReferenceTablesOnActivate; + SetLocalReplicateReferenceTablesOnActivate(false); + + ActivateNodeList(workerNodes); + TransactionModifiedNodeMetadata = true; + + SetLocalReplicateReferenceTablesOnActivate(prevReplicateRefTablesOnActivate); + + PG_RETURN_BOOL(true); +} + + /* * SyncNodeMetadataToNode is the internal API for * start_metadata_sync_to_node(). @@ -425,6 +454,24 @@ ClusterHasKnownMetadataWorkers() } +/* + * ShouldSyncUserCommandForObject checks if the user command should be synced to the + * worker nodes for the given object. + */ +bool +ShouldSyncUserCommandForObject(ObjectAddress objectAddress) +{ + if (objectAddress.classId == RelationRelationId) + { + Oid relOid = objectAddress.objectId; + return ShouldSyncTableMetadata(relOid) || + get_rel_relkind(relOid) == RELKIND_VIEW; + } + + return false; +} + + /* * ShouldSyncTableMetadata checks if the metadata of a distributed table should be * propagated to metadata workers, i.e. the table is a hash distributed table or @@ -524,10 +571,10 @@ SyncNodeMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError) */ if (raiseOnError) { - SendMetadataCommandListToWorkerInCoordinatedTransaction(workerNode->workerName, - workerNode->workerPort, - currentUser, - recreateMetadataSnapshotCommandList); + SendMetadataCommandListToWorkerListInCoordinatedTransaction(list_make1( + workerNode), + currentUser, + recreateMetadataSnapshotCommandList); return true; } else @@ -2219,16 +2266,16 @@ DetachPartitionCommandList(void) /* - * SyncNodeMetadataToNodes tries recreating the metadata snapshot in the - * metadata workers that are out of sync. Returns the result of - * synchronization. + * SyncNodeMetadataToNodesOptional tries recreating the metadata + * snapshot in the metadata workers that are out of sync. + * Returns the result of synchronization. * * This function must be called within coordinated transaction * since updates on the pg_dist_node metadata must be rollbacked if anything * goes wrong. */ static NodeMetadataSyncResult -SyncNodeMetadataToNodes(void) +SyncNodeMetadataToNodesOptional(void) { NodeMetadataSyncResult result = NODE_METADATA_SYNC_SUCCESS; if (!IsCoordinator()) @@ -2288,6 +2335,46 @@ SyncNodeMetadataToNodes(void) } +/* + * SyncNodeMetadataToNodes recreates the node metadata snapshot in all the + * metadata workers. + * + * This function runs within a coordinated transaction since updates on + * the pg_dist_node metadata must be rollbacked if anything + * goes wrong. + */ +void +SyncNodeMetadataToNodes(void) +{ + EnsureCoordinator(); + + /* + * Request a RowExclusiveLock so we don't run concurrently with other + * functions updating pg_dist_node, but allow concurrency with functions + * which are just reading from pg_dist_node. + */ + if (!ConditionalLockRelationOid(DistNodeRelationId(), RowExclusiveLock)) + { + ereport(ERROR, (errmsg("cannot sync metadata because a concurrent " + "metadata syncing operation is in progress"))); + } + + List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerList) + { + if (workerNode->hasMetadata) + { + SetWorkerColumnLocalOnly(workerNode, Anum_pg_dist_node_metadatasynced, + BoolGetDatum(true)); + + bool raiseOnError = true; + SyncNodeMetadataSnapshotToNode(workerNode, raiseOnError); + } + } +} + + /* * SyncNodeMetadataToNodesMain is the main function for syncing node metadata to * MX nodes. It retries until success and then exits. @@ -2334,7 +2421,7 @@ SyncNodeMetadataToNodesMain(Datum main_arg) { UseCoordinatedTransaction(); - NodeMetadataSyncResult result = SyncNodeMetadataToNodes(); + NodeMetadataSyncResult result = SyncNodeMetadataToNodesOptional(); syncedAllNodes = (result == NODE_METADATA_SYNC_SUCCESS); /* we use LISTEN/NOTIFY to wait for metadata syncing in tests */ @@ -3393,12 +3480,19 @@ ColocationGroupCreateCommandList(void) "distributioncolumncollationschema) AS (VALUES "); Relation pgDistColocation = table_open(DistColocationRelationId(), AccessShareLock); + Relation colocationIdIndexRel = index_open(DistColocationIndexId(), AccessShareLock); - bool indexOK = false; - SysScanDesc scanDescriptor = systable_beginscan(pgDistColocation, InvalidOid, indexOK, - NULL, 0, NULL); + /* + * It is not strictly necessary to read the tuples in order. + * However, it is useful to get consistent behavior, both for regression + * tests and also in production systems. + */ + SysScanDesc scanDescriptor = + systable_beginscan_ordered(pgDistColocation, colocationIdIndexRel, + NULL, 0, NULL); - HeapTuple colocationTuple = systable_getnext(scanDescriptor); + HeapTuple colocationTuple = systable_getnext_ordered(scanDescriptor, + ForwardScanDirection); while (HeapTupleIsValid(colocationTuple)) { @@ -3456,10 +3550,11 @@ ColocationGroupCreateCommandList(void) "NULL, NULL)"); } - colocationTuple = systable_getnext(scanDescriptor); + colocationTuple = systable_getnext_ordered(scanDescriptor, ForwardScanDirection); } - systable_endscan(scanDescriptor); + systable_endscan_ordered(scanDescriptor); + index_close(colocationIdIndexRel, AccessShareLock); table_close(pgDistColocation, AccessShareLock); if (!hasColocations) diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index c66b0d3e2..c19148317 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -106,17 +106,18 @@ static void InsertPlaceholderCoordinatorRecord(void); static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, NodeMetadata *nodeMetadata); static void DeleteNodeRow(char *nodename, int32 nodeport); -static void SyncDistributedObjectsToNode(WorkerNode *workerNode); +static void SyncDistributedObjectsToNodeList(List *workerNodeList); static void UpdateLocalGroupIdOnNode(WorkerNode *workerNode); -static void SyncPgDistTableMetadataToNode(WorkerNode *workerNode); +static void SyncPgDistTableMetadataToNodeList(List *nodeList); static List * InterTableRelationshipCommandList(); +static void BlockDistributedQueriesOnMetadataNodes(void); static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple); static List * PropagateNodeWideObjectsCommandList(); static WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort); static bool NodeIsLocal(WorkerNode *worker); static void SetLockTimeoutLocally(int32 lock_cooldown); static void UpdateNodeLocation(int32 nodeId, char *newNodeName, int32 newNodePort); -static bool UnsetMetadataSyncedForAll(void); +static bool UnsetMetadataSyncedForAllWorkers(void); static char * GetMetadataSyncCommandToSetNodeColumn(WorkerNode *workerNode, int columnIndex, Datum value); @@ -150,6 +151,7 @@ PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column); PG_FUNCTION_INFO_V1(citus_nodename_for_nodeid); PG_FUNCTION_INFO_V1(citus_nodeport_for_nodeid); PG_FUNCTION_INFO_V1(citus_coordinator_nodeid); +PG_FUNCTION_INFO_V1(citus_is_coordinator); /* @@ -451,7 +453,7 @@ citus_disable_node(PG_FUNCTION_ARGS) { text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); - bool forceDisableNode = PG_GETARG_BOOL(2); + bool synchronousDisableNode = PG_GETARG_BOOL(2); char *nodeName = text_to_cstring(nodeNameText); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); @@ -462,8 +464,10 @@ citus_disable_node(PG_FUNCTION_ARGS) "isactive"); WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode(); - if (!forceDisableNode && firstWorkerNode && - firstWorkerNode->nodeId == workerNode->nodeId) + bool disablingFirstNode = + (firstWorkerNode && firstWorkerNode->nodeId == workerNode->nodeId); + + if (disablingFirstNode && !synchronousDisableNode) { /* * We sync metadata async and optionally in the background worker, @@ -477,16 +481,21 @@ citus_disable_node(PG_FUNCTION_ARGS) * possibility of diverged shard placements for the same shard. * * To prevent that, we currently do not allow disabling the first - * worker node. + * worker node unless it is explicitly opted synchronous. */ ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("disabling the first worker node in the " "metadata is not allowed"), - errhint("You can force disabling node, but this operation " - "might cause replicated shards to diverge: SELECT " - "citus_disable_node('%s', %d, force:=true);", - workerNode->workerName, - nodePort))); + errhint("You can force disabling node, SELECT " + "citus_disable_node('%s', %d, " + "synchronous:=true);", workerNode->workerName, + nodePort), + errdetail("Citus uses the first worker node in the " + "metadata for certain internal operations when " + "replicated tables are modified. Synchronous mode " + "ensures that all nodes have the same view of the " + "first worker node, which is used for certain " + "locking operations."))); } /* @@ -505,38 +514,55 @@ citus_disable_node(PG_FUNCTION_ARGS) * for any given shard. */ ErrorIfNodeContainsNonRemovablePlacements(workerNode); - - bool onlyConsiderActivePlacements = false; - if (NodeGroupHasShardPlacements(workerNode->groupId, - onlyConsiderActivePlacements)) - { - ereport(NOTICE, (errmsg( - "Node %s:%d has active shard placements. Some queries " - "may fail after this operation. Use " - "SELECT citus_activate_node('%s', %d) to activate this " - "node back.", - workerNode->workerName, nodePort, - workerNode->workerName, - nodePort))); - } } TransactionModifiedNodeMetadata = true; - /* - * We have not propagated the node metadata changes yet, make sure that all the - * active nodes get the metadata updates. We defer this operation to the - * background worker to make it possible disabling nodes when multiple nodes - * are down. - * - * Note that the active placements reside on the active nodes. Hence, when - * Citus finds active placements, it filters out the placements that are on - * the disabled nodes. That's why, we don't have to change/sync placement - * metadata at this point. Instead, we defer that to citus_activate_node() - * where we expect all nodes up and running. - */ - if (UnsetMetadataSyncedForAll()) + if (synchronousDisableNode) { + /* + * The user might pick between sync vs async options. + * - Pros for the sync option: + * (a) the changes become visible on the cluster immediately + * (b) even if the first worker node is disabled, there is no + * risk of divergence of the placements of replicated shards + * - Cons for the sync options: + * (a) Does not work within 2PC transaction (e.g., BEGIN; + * citus_disable_node(); PREPARE TRANSACTION ...); + * (b) If there are multiple node failures (e.g., one another node + * than the current node being disabled), the sync option would + * fail because it'd try to sync the metadata changes to a node + * that is not up and running. + */ + if (firstWorkerNode && firstWorkerNode->nodeId == workerNode->nodeId) + { + /* + * We cannot let any modification query on a replicated table to run + * concurrently with citus_disable_node() on the first worker node. If + * we let that, some worker nodes might calculate FirstWorkerNode() + * different than others. See LockShardListResourcesOnFirstWorker() + * for the details. + */ + BlockDistributedQueriesOnMetadataNodes(); + } + + SyncNodeMetadataToNodes(); + } + else if (UnsetMetadataSyncedForAllWorkers()) + { + /* + * We have not propagated the node metadata changes yet, make sure that all the + * active nodes get the metadata updates. We defer this operation to the + * background worker to make it possible disabling nodes when multiple nodes + * are down. + * + * Note that the active placements reside on the active nodes. Hence, when + * Citus finds active placements, it filters out the placements that are on + * the disabled nodes. That's why, we don't have to change/sync placement + * metadata at this point. Instead, we defer that to citus_activate_node() + * where we expect all nodes up and running. + */ + TriggerNodeMetadataSyncOnCommit(); } @@ -544,6 +570,33 @@ citus_disable_node(PG_FUNCTION_ARGS) } +/* + * BlockDistributedQueriesOnMetadataNodes blocks all the modification queries on + * all nodes. Hence, should be used with caution. + */ +static void +BlockDistributedQueriesOnMetadataNodes(void) +{ + /* first, block on the coordinator */ + LockRelationOid(DistNodeRelationId(), ExclusiveLock); + + /* + * Note that we might re-design this lock to be more granular than + * pg_dist_node, scoping only for modifications on the replicated + * tables. However, we currently do not have any such mechanism and + * given that citus_disable_node() runs instantly, it seems acceptable + * to block reads (or modifications on non-replicated tables) for + * a while. + */ + + /* only superuser can disable node */ + Assert(superuser()); + + SendCommandToWorkersWithMetadata( + "LOCK TABLE pg_catalog.pg_dist_node IN EXCLUSIVE MODE;"); +} + + /* * master_disable_node is a wrapper function for old UDF name. */ @@ -790,7 +843,7 @@ SyncDistributedObjectsCommandList(WorkerNode *workerNode) /* - * SyncDistributedObjectsToNode sync the distributed objects to the node. It includes + * SyncDistributedObjectsToNodeList sync the distributed objects to the node. It includes * - All dependencies (e.g., types, schemas, sequences) * - All shell distributed table * - Inter relation between those shell tables @@ -799,17 +852,29 @@ SyncDistributedObjectsCommandList(WorkerNode *workerNode) * since all the dependencies should be present in the coordinator already. */ static void -SyncDistributedObjectsToNode(WorkerNode *workerNode) +SyncDistributedObjectsToNodeList(List *workerNodeList) { - if (NodeIsCoordinator(workerNode)) + List *workerNodesToSync = NIL; + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerNodeList) { - /* coordinator has all the objects */ - return; + if (NodeIsCoordinator(workerNode)) + { + /* coordinator has all the objects */ + continue; + } + + if (!NodeIsPrimary(workerNode)) + { + /* secondary nodes gets the objects from their primaries via replication */ + continue; + } + + workerNodesToSync = lappend(workerNodesToSync, workerNode); } - if (!NodeIsPrimary(workerNode)) + if (workerNodesToSync == NIL) { - /* secondary nodes gets the objects from their primaries via replication */ return; } @@ -821,9 +886,8 @@ SyncDistributedObjectsToNode(WorkerNode *workerNode) /* send commands to new workers, the current user should be a superuser */ Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction( - workerNode->workerName, - workerNode->workerPort, + SendMetadataCommandListToWorkerListInCoordinatedTransaction( + workerNodesToSync, CurrentUserName(), commandList); } @@ -841,9 +905,8 @@ UpdateLocalGroupIdOnNode(WorkerNode *workerNode) /* send commands to new workers, the current user should be a superuser */ Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction( - workerNode->workerName, - workerNode->workerPort, + SendMetadataCommandListToWorkerListInCoordinatedTransaction( + list_make1(workerNode), CurrentUserName(), commandList); } @@ -851,25 +914,33 @@ UpdateLocalGroupIdOnNode(WorkerNode *workerNode) /* - * SyncPgDistTableMetadataToNode syncs the pg_dist_partition, pg_dist_shard + * SyncPgDistTableMetadataToNodeList syncs the pg_dist_partition, pg_dist_shard * pg_dist_placement and pg_dist_object metadata entries. * */ static void -SyncPgDistTableMetadataToNode(WorkerNode *workerNode) +SyncPgDistTableMetadataToNodeList(List *nodeList) { - if (NodeIsPrimary(workerNode) && !NodeIsCoordinator(workerNode)) - { - List *syncPgDistMetadataCommandList = PgDistTableMetadataSyncCommandList(); + /* send commands to new workers, the current user should be a superuser */ + Assert(superuser()); - /* send commands to new workers, the current user should be a superuser */ - Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction( - workerNode->workerName, - workerNode->workerPort, - CurrentUserName(), - syncPgDistMetadataCommandList); + List *syncPgDistMetadataCommandList = PgDistTableMetadataSyncCommandList(); + + List *nodesWithMetadata = NIL; + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, nodeList) + { + if (NodeIsPrimary(workerNode) && !NodeIsCoordinator(workerNode)) + { + nodesWithMetadata = lappend(nodesWithMetadata, workerNode); + } } + + + SendMetadataCommandListToWorkerListInCoordinatedTransaction( + nodesWithMetadata, + CurrentUserName(), + syncPgDistMetadataCommandList); } @@ -1065,15 +1136,14 @@ PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes) /* - * ActivateNode activates the node with nodeName and nodePort. Currently, activation - * includes only replicating the reference tables and setting isactive column of the - * given node. + * ActivateNodeList iterates over the nodeList and activates the nodes. + * Some part of the node activation is done parallel across the nodes, + * such as syncing the metadata. However, reference table replication is + * done one by one across nodes. */ -int -ActivateNode(char *nodeName, int nodePort) +void +ActivateNodeList(List *nodeList) { - bool isActive = true; - /* * We currently require the object propagation to happen via superuser, * see #5139. While activating a node, we sync both metadata and object @@ -1090,86 +1160,130 @@ ActivateNode(char *nodeName, int nodePort) /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); - /* - * First, locally mark the node is active, if everything goes well, - * we are going to sync this information to all the metadata nodes. - */ - WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); - if (workerNode == NULL) - { - ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort))); - } - /* - * Delete existing reference and replicated table placements on the - * given groupId if the group has been disabled earlier (e.g., isActive - * set to false). - * - * Sync the metadata changes to all existing metadata nodes irrespective - * of the current nodes' metadata sync state. We expect all nodes up - * and running when another node is activated. - */ - if (!workerNode->isActive && NodeIsPrimary(workerNode)) - { - bool localOnly = false; - DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId, - localOnly); - } - - workerNode = - SetWorkerColumnLocalOnly(workerNode, Anum_pg_dist_node_isactive, - BoolGetDatum(isActive)); - - /* TODO: Once all tests will be enabled for MX, we can remove sync by default check */ - bool syncMetadata = EnableMetadataSync && NodeIsPrimary(workerNode); - - if (syncMetadata) + List *nodeToSyncMetadata = NIL; + WorkerNode *node = NULL; + foreach_ptr(node, nodeList) { /* - * We are going to sync the metadata anyway in this transaction, so do - * not fail just because the current metadata is not synced. + * First, locally mark the node is active, if everything goes well, + * we are going to sync this information to all the metadata nodes. */ - SetWorkerColumn(workerNode, Anum_pg_dist_node_metadatasynced, - BoolGetDatum(true)); - - /* - * Update local group id first, as object dependency logic requires to have - * updated local group id. - */ - UpdateLocalGroupIdOnNode(workerNode); - - /* - * Sync distributed objects first. We must sync distributed objects before - * replicating reference tables to the remote node, as reference tables may - * need such objects. - */ - SyncDistributedObjectsToNode(workerNode); - - /* - * We need to replicate reference tables before syncing node metadata, otherwise - * reference table replication logic would try to get lock on the new node before - * having the shard placement on it - */ - if (ReplicateReferenceTablesOnActivate) + WorkerNode *workerNode = + FindWorkerNodeAnyCluster(node->workerName, node->workerPort); + if (workerNode == NULL) { - ReplicateAllReferenceTablesToNode(workerNode); + ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", node->workerName, + node->workerPort))); } - /* - * Sync node metadata. We must sync node metadata before syncing table - * related pg_dist_xxx metadata. Since table related metadata requires - * to have right pg_dist_node entries. - */ - SyncNodeMetadataToNode(nodeName, nodePort); + /* both nodes should be the same */ + Assert(workerNode->nodeId == node->nodeId); /* - * As the last step, sync the table related metadata to the remote node. - * We must handle it as the last step because of limitations shared with - * above comments. + * Delete existing reference and replicated table placements on the + * given groupId if the group has been disabled earlier (e.g., isActive + * set to false). + * + * Sync the metadata changes to all existing metadata nodes irrespective + * of the current nodes' metadata sync state. We expect all nodes up + * and running when another node is activated. */ - SyncPgDistTableMetadataToNode(workerNode); + if (!workerNode->isActive && NodeIsPrimary(workerNode)) + { + bool localOnly = false; + DeleteAllReplicatedTablePlacementsFromNodeGroup(workerNode->groupId, + localOnly); + } + + workerNode = + SetWorkerColumnLocalOnly(workerNode, Anum_pg_dist_node_isactive, + BoolGetDatum(true)); + + /* TODO: Once all tests will be enabled for MX, we can remove sync by default check */ + bool syncMetadata = EnableMetadataSync && NodeIsPrimary(workerNode); + if (syncMetadata) + { + /* + * We are going to sync the metadata anyway in this transaction, so do + * not fail just because the current metadata is not synced. + */ + SetWorkerColumn(workerNode, Anum_pg_dist_node_metadatasynced, + BoolGetDatum(true)); + + /* + * Update local group id first, as object dependency logic requires to have + * updated local group id. + */ + UpdateLocalGroupIdOnNode(workerNode); + + nodeToSyncMetadata = lappend(nodeToSyncMetadata, workerNode); + } } + /* + * Sync distributed objects first. We must sync distributed objects before + * replicating reference tables to the remote node, as reference tables may + * need such objects. + */ + SyncDistributedObjectsToNodeList(nodeToSyncMetadata); + + if (ReplicateReferenceTablesOnActivate) + { + foreach_ptr(node, nodeList) + { + /* + * We need to replicate reference tables before syncing node metadata, otherwise + * reference table replication logic would try to get lock on the new node before + * having the shard placement on it + */ + if (NodeIsPrimary(node)) + { + ReplicateAllReferenceTablesToNode(node); + } + } + } + + /* + * Sync node metadata. We must sync node metadata before syncing table + * related pg_dist_xxx metadata. Since table related metadata requires + * to have right pg_dist_node entries. + */ + foreach_ptr(node, nodeToSyncMetadata) + { + SyncNodeMetadataToNode(node->workerName, node->workerPort); + } + + /* + * As the last step, sync the table related metadata to the remote node. + * We must handle it as the last step because of limitations shared with + * above comments. + */ + SyncPgDistTableMetadataToNodeList(nodeToSyncMetadata); + + foreach_ptr(node, nodeList) + { + bool isActive = true; + + /* finally, let all other active metadata nodes to learn about this change */ + SetNodeState(node->workerName, node->workerPort, isActive); + } +} + + +/* + * ActivateNode activates the node with nodeName and nodePort. Currently, activation + * includes only replicating the reference tables and setting isactive column of the + * given node. + */ +int +ActivateNode(char *nodeName, int nodePort) +{ + bool isActive = true; + + WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); + ActivateNodeList(list_make1(workerNode)); + /* finally, let all other active metadata nodes to learn about this change */ WorkerNode *newWorkerNode = SetNodeState(nodeName, nodePort, isActive); Assert(newWorkerNode->nodeId == workerNode->nodeId); @@ -1319,7 +1433,7 @@ citus_update_node(PG_FUNCTION_ARGS) * early, but that's fine, since this will start a retry loop with * 5 second intervals until sync is complete. */ - if (UnsetMetadataSyncedForAll()) + if (UnsetMetadataSyncedForAllWorkers()) { TriggerNodeMetadataSyncOnCommit(); } @@ -1558,6 +1672,29 @@ citus_coordinator_nodeid(PG_FUNCTION_ARGS) } +/* + * citus_is_coordinator returns whether the current node is a coordinator. + * We consider the node a coordinator if its group ID is 0 and it has + * pg_dist_node entries (only group ID 0 could indicate a worker without + * metadata). + */ +Datum +citus_is_coordinator(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + bool isCoordinator = false; + + if (GetLocalGroupId() == COORDINATOR_GROUP_ID && + ActivePrimaryNodeCount() > 0) + { + isCoordinator = true; + } + + PG_RETURN_BOOL(isCoordinator); +} + + /* * FindWorkerNode searches over the worker nodes and returns the workerNode * if it already exists. Else, the function returns NULL. @@ -2646,15 +2783,15 @@ DatumToString(Datum datum, Oid dataType) /* - * UnsetMetadataSyncedForAll sets the metadatasynced column of all metadata - * nodes to false. It returns true if it updated at least a node. + * UnsetMetadataSyncedForAllWorkers sets the metadatasynced column of all metadata + * worker nodes to false. It returns true if it updated at least a node. */ static bool -UnsetMetadataSyncedForAll(void) +UnsetMetadataSyncedForAllWorkers(void) { bool updatedAtLeastOne = false; - ScanKeyData scanKey[2]; - int scanKeyCount = 2; + ScanKeyData scanKey[3]; + int scanKeyCount = 3; bool indexOK = false; /* @@ -2669,6 +2806,11 @@ UnsetMetadataSyncedForAll(void) ScanKeyInit(&scanKey[1], Anum_pg_dist_node_metadatasynced, BTEqualStrategyNumber, F_BOOLEQ, BoolGetDatum(true)); + /* coordinator always has the up to date metadata */ + ScanKeyInit(&scanKey[2], Anum_pg_dist_node_groupid, + BTGreaterStrategyNumber, F_INT4GT, + Int32GetDatum(COORDINATOR_GROUP_ID)); + CatalogIndexState indstate = CatalogOpenIndexes(relation); SysScanDesc scanDescriptor = systable_beginscan(relation, diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c similarity index 98% rename from src/backend/distributed/metadata/pg_get_object_address_12_13_14.c rename to src/backend/distributed/metadata/pg_get_object_address_13_14_15.c index 26248f025..3a7c1fc49 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_get_object_address_12_13_14.c + * pg_get_object_address_13_14_15.c * * Copied functions from Postgres pg_get_object_address with acl/owner check. * Since we need to use intermediate data types Relation and Node from @@ -40,11 +40,6 @@ static void ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, Relation *relation); static List * textarray_to_strvaluelist(ArrayType *arr); -/* It is defined on PG >= 13 versions by default */ -#if PG_VERSION_NUM < PG_VERSION_13 - #define TYPALIGN_INT 'i' -#endif - /* * PgGetObjectAddress gets the object address. This function is mostly copied from * pg_get_object_address of the PG code. We need to copy that function to use @@ -283,6 +278,9 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr) case OBJECT_FDW: case OBJECT_FOREIGN_SERVER: case OBJECT_LANGUAGE: +#if PG_VERSION_NUM >= PG_VERSION_15 + case OBJECT_PARAMETER_ACL: +#endif case OBJECT_PUBLICATION: case OBJECT_ROLE: case OBJECT_SCHEMA: @@ -320,6 +318,9 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr) break; } +#if PG_VERSION_NUM >= PG_VERSION_15 + case OBJECT_PUBLICATION_NAMESPACE: +#endif case OBJECT_USER_MAPPING: { objnode = (Node *) list_make2(linitial(name), linitial(args)); @@ -419,6 +420,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, case OBJECT_TABLE: case OBJECT_EXTENSION: case OBJECT_COLLATION: + case OBJECT_VIEW: { check_object_ownership(userId, type, *addr, node, *relation); break; diff --git a/src/backend/distributed/operations/citus_tools.c b/src/backend/distributed/operations/citus_tools.c index 8dfc862b5..9d3910a32 100644 --- a/src/backend/distributed/operations/citus_tools.c +++ b/src/backend/distributed/operations/citus_tools.c @@ -31,6 +31,9 @@ #include "utils/builtins.h" +#define SET_APPLICATION_NAME_QUERY \ + "SET application_name TO '" CITUS_RUN_COMMAND_APPLICATION_NAME "'" + PG_FUNCTION_INFO_V1(master_run_on_worker); static int ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray, @@ -44,15 +47,15 @@ static void ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int commandCount); static bool GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, StringInfo queryResultString); -static bool EvaluateQueryResult(MultiConnection *connection, PGresult *queryResult, - StringInfo queryResultString); -static void StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString); static void ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, StringInfo *commandStringArray, bool *statusArray, StringInfo *resultStringArray, int commandCount); +static bool ExecuteOptionalSingleResultCommand(MultiConnection *connection, + char *queryString, StringInfo + queryResultString); static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor, StringInfo *nodeNameArray, int *nodePortArray, bool *statusArray, @@ -241,18 +244,66 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor FinishConnectionEstablishment(connection); + /* check whether connection attempt was successful */ if (PQstatus(connection->pgConn) != CONNECTION_OK) { appendStringInfo(queryResultString, "failed to connect to %s:%d", nodeName, - (int) nodePort); + nodePort); statusArray[commandIndex] = false; + CloseConnection(connection); connectionArray[commandIndex] = NULL; finishedCount++; + continue; } - else + + /* set the application_name to avoid nested execution checks */ + int querySent = SendRemoteCommand(connection, SET_APPLICATION_NAME_QUERY); + if (querySent == 0) { - statusArray[commandIndex] = true; + StoreErrorMessage(connection, queryResultString); + statusArray[commandIndex] = false; + CloseConnection(connection); + connectionArray[commandIndex] = NULL; + finishedCount++; + continue; } + + statusArray[commandIndex] = true; + } + + /* send queries at once */ + for (int commandIndex = 0; commandIndex < commandCount; commandIndex++) + { + MultiConnection *connection = connectionArray[commandIndex]; + if (connection == NULL) + { + continue; + } + + bool raiseInterrupts = true; + PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts); + + /* write the result value or error message to queryResultString */ + StringInfo queryResultString = resultStringArray[commandIndex]; + bool success = EvaluateSingleQueryResult(connection, queryResult, + queryResultString); + if (!success) + { + statusArray[commandIndex] = false; + CloseConnection(connection); + connectionArray[commandIndex] = NULL; + finishedCount++; + continue; + } + + /* clear results for the next command */ + PQclear(queryResult); + + bool raiseErrors = false; + ClearResults(connection, raiseErrors); + + /* we only care about the SET application_name result on failure */ + resetStringInfo(queryResultString); } /* send queries at once */ @@ -359,7 +410,7 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, /* query result is available at this point */ PGresult *queryResult = PQgetResult(connection->pgConn); - bool success = EvaluateQueryResult(connection, queryResult, queryResultString); + bool success = EvaluateSingleQueryResult(connection, queryResult, queryResultString); PQclear(queryResult); *resultStatus = success; @@ -368,95 +419,6 @@ GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, } -/* - * EvaluateQueryResult gets the query result from connection and returns - * true if the query is executed successfully, false otherwise. A query result - * or an error message is returned in queryResultString. The function requires - * that the query returns a single column/single row result. It returns an - * error otherwise. - */ -static bool -EvaluateQueryResult(MultiConnection *connection, PGresult *queryResult, - StringInfo queryResultString) -{ - bool success = false; - - ExecStatusType resultStatus = PQresultStatus(queryResult); - if (resultStatus == PGRES_COMMAND_OK) - { - char *commandStatus = PQcmdStatus(queryResult); - appendStringInfo(queryResultString, "%s", commandStatus); - success = true; - } - else if (resultStatus == PGRES_TUPLES_OK) - { - int ntuples = PQntuples(queryResult); - int nfields = PQnfields(queryResult); - - /* error if query returns more than 1 rows, or more than 1 fields */ - if (nfields != 1) - { - appendStringInfo(queryResultString, - "expected a single column in query target"); - } - else if (ntuples > 1) - { - appendStringInfo(queryResultString, - "expected a single row in query result"); - } - else - { - int row = 0; - int column = 0; - if (!PQgetisnull(queryResult, row, column)) - { - char *queryResultValue = PQgetvalue(queryResult, row, column); - appendStringInfo(queryResultString, "%s", queryResultValue); - } - success = true; - } - } - else - { - StoreErrorMessage(connection, queryResultString); - } - - return success; -} - - -/* - * StoreErrorMessage gets the error message from connection and stores it - * in queryResultString. It should be called only when error is present - * otherwise it would return a default error message. - */ -static void -StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString) -{ - char *errorMessage = PQerrorMessage(connection->pgConn); - if (errorMessage != NULL) - { - /* copy the error message to a writable memory */ - errorMessage = pnstrdup(errorMessage, strlen(errorMessage)); - - char *firstNewlineIndex = strchr(errorMessage, '\n'); - - /* trim the error message at the line break */ - if (firstNewlineIndex != NULL) - { - *firstNewlineIndex = '\0'; - } - } - else - { - /* put a default error message if no error message is reported */ - errorMessage = "An error occurred while running the query"; - } - - appendStringInfo(queryResultString, "%s", errorMessage); -} - - /* * ExecuteCommandsAndStoreResults connects to each node specified in * nodeNameArray and nodePortArray, and executes command in commandStringArray @@ -471,63 +433,76 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, { for (int commandIndex = 0; commandIndex < commandCount; commandIndex++) { + CHECK_FOR_INTERRUPTS(); + char *nodeName = nodeNameArray[commandIndex]->data; int32 nodePort = nodePortArray[commandIndex]; char *queryString = commandStringArray[commandIndex]->data; StringInfo queryResultString = resultStringArray[commandIndex]; - bool reportResultError = false; - bool success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString, - queryResultString, reportResultError); + int connectionFlags = FORCE_NEW_CONNECTION; + MultiConnection *connection = + GetNodeConnection(connectionFlags, nodeName, nodePort); + + /* set the application_name to avoid nested execution checks */ + bool success = ExecuteOptionalSingleResultCommand(connection, + SET_APPLICATION_NAME_QUERY, + queryResultString); + if (!success) + { + statusArray[commandIndex] = false; + CloseConnection(connection); + continue; + } + + /* we only care about the SET application_name result on failure */ + resetStringInfo(queryResultString); + + /* send the actual query string */ + success = ExecuteOptionalSingleResultCommand(connection, queryString, + queryResultString); statusArray[commandIndex] = success; - - CHECK_FOR_INTERRUPTS(); + CloseConnection(connection); } } /* - * ExecuteRemoteQueryOrCommand executes a query at specified remote node using + * ExecuteOptionalSingleResultCommand executes a query at specified remote node using * the calling user's credentials. The function returns the query status * (success/failure), and query result. The query is expected to return a single * target containing zero or one rows. */ -bool -ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, - StringInfo queryResultString, bool reportResultError) +static bool +ExecuteOptionalSingleResultCommand(MultiConnection *connection, char *queryString, + StringInfo queryResultString) { - int connectionFlags = FORCE_NEW_CONNECTION; - MultiConnection *connection = - GetNodeConnection(connectionFlags, nodeName, nodePort); - bool raiseInterrupts = true; - if (PQstatus(connection->pgConn) != CONNECTION_OK) { - appendStringInfo(queryResultString, "failed to connect to %s:%d", nodeName, - (int) nodePort); + appendStringInfo(queryResultString, "failed to connect to %s:%d", + connection->hostname, connection->port); return false; } if (!SendRemoteCommand(connection, queryString)) { - appendStringInfo(queryResultString, "failed to send query to %s:%d", nodeName, - (int) nodePort); + appendStringInfo(queryResultString, "failed to send query to %s:%d", + connection->hostname, connection->port); return false; } + bool raiseInterrupts = true; PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts); - bool success = EvaluateQueryResult(connection, queryResult, queryResultString); - if (!success && reportResultError) - { - ReportResultError(connection, queryResult, ERROR); - } + /* write the result value or error message to queryResultString */ + bool success = EvaluateSingleQueryResult(connection, queryResult, queryResultString); + /* clear result and close the connection */ PQclear(queryResult); - /* close the connection */ - CloseConnection(connection); + bool raiseErrors = false; + ClearResults(connection, raiseErrors); return success; } diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index bd1a53ad4..ab99ac93a 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -1039,12 +1039,12 @@ CitusCreateAlterColumnarTableSet(char *qualifiedRelationName, initStringInfo(&buf); appendStringInfo(&buf, - "SELECT alter_columnar_table_set(%s, " - "chunk_group_row_limit => %d, " - "stripe_row_limit => %lu, " - "compression_level => %d, " - "compression => %s);", - quote_literal_cstr(qualifiedRelationName), + "ALTER TABLE %s SET (" + "columnar.chunk_group_row_limit = %d, " + "columnar.stripe_row_limit = %lu, " + "columnar.compression_level = %d, " + "columnar.compression = %s);", + qualifiedRelationName, options->chunkRowCount, options->stripeRowCount, options->compressionLevel, diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 16ee50c52..ecb0d6673 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -54,7 +54,6 @@ #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" -#include "utils/int8.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -1396,9 +1395,9 @@ GetShardStatistics(MultiConnection *connection, HTAB *shardIds) for (int rowIndex = 0; rowIndex < rowCount; rowIndex++) { char *shardIdString = PQgetvalue(result, rowIndex, 0); - uint64 shardId = pg_strtouint64(shardIdString, NULL, 10); + uint64 shardId = strtou64(shardIdString, NULL, 10); char *sizeString = PQgetvalue(result, rowIndex, 1); - uint64 totalSize = pg_strtouint64(sizeString, NULL, 10); + uint64 totalSize = strtou64(sizeString, NULL, 10); ShardStatistics *statistics = hash_search(shardStatistics, &shardId, HASH_ENTER, NULL); diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index d6e9c0f2a..8f77205cb 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -923,7 +923,7 @@ WorkerShardStats(ShardPlacement *placement, Oid relationId, const char *shardNam } errno = 0; - uint64 tableSize = pg_strtouint64(tableSizeString, &tableSizeStringEnd, 0); + uint64 tableSize = strtou64(tableSizeString, &tableSizeStringEnd, 0); if (errno != 0 || (*tableSizeStringEnd) != '\0') { PQclear(queryResult); diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 1054049e4..16c0afb54 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -393,7 +393,7 @@ NodeNamePortCompare(const char *workerLhsName, const char *workerRhsName, WorkerNode * GetFirstPrimaryWorkerNode(void) { - List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(NoLock); + List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock); WorkerNode *firstWorkerNode = NULL; WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerNodeList) diff --git a/src/backend/distributed/planner/cte_inline.c b/src/backend/distributed/planner/cte_inline.c index 4a3ba156f..2356ebf48 100644 --- a/src/backend/distributed/planner/cte_inline.c +++ b/src/backend/distributed/planner/cte_inline.c @@ -12,6 +12,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" +#include "pg_version_compat.h" #include "distributed/pg_version_constants.h" #include "distributed/cte_inline.h" @@ -309,7 +310,7 @@ inline_cte_walker(Node *node, inline_cte_walker_context *context) */ if (columnAliasCount >= columnIndex) { - Value *columnAlias = (Value *) list_nth(columnAliasList, columnIndex - 1); + String *columnAlias = (String *) list_nth(columnAliasList, columnIndex - 1); Assert(IsA(columnAlias, String)); TargetEntry *targetEntry = list_nth(rte->subquery->targetList, columnIndex - 1); diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 6e053cecd..39f6c0b63 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -1065,7 +1065,7 @@ CreateDistributedPlan(uint64 planId, Query *originalQuery, Query *query, ParamLi /* - * EnsurePartitionTableNotReplicated errors out if the infput relation is + * EnsurePartitionTableNotReplicated errors out if the input relation is * a partition table and the table has a replication factor greater than * one. * @@ -1353,7 +1353,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) TargetEntry *targetEntry = NULL; foreach_ptr(targetEntry, customScan->scan.plan.targetlist) { - Value *columnName = makeString(targetEntry->resname); + String *columnName = makeString(targetEntry->resname); columnNameList = lappend(columnNameList, columnName); } diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index 8a2d87fe7..5d02be07c 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -102,15 +102,15 @@ PlannedStmt * GeneratePlaceHolderPlannedStmt(Query *parse) { PlannedStmt *result = makeNode(PlannedStmt); - SeqScan *seqScanNode = makeNode(SeqScan); - Plan *plan = &seqScanNode->plan; + Scan *scanNode = makeNode(Scan); + Plan *plan = &scanNode->plan; Node *distKey PG_USED_FOR_ASSERTS_ONLY = NULL; AssertArg(FastPathRouterQuery(parse, &distKey)); /* there is only a single relation rte */ - seqScanNode->scanrelid = 1; + scanNode->scanrelid = 1; plan->targetlist = copyObject(FetchStatementTargetList((Node *) parse)); diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 3ca22f3b1..ffba4d988 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -17,6 +17,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/defrem.h" +#include "distributed/backend_data.h" #include "distributed/metadata_utility.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 4c6370e5a..a807085af 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -1062,8 +1062,8 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS) } /* resolve OIDs of unknown (user-defined) types */ - Query *analyzedQuery = parse_analyze_varparams(parseTree, queryString, - ¶mTypes, &numParams); + Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString, + ¶mTypes, &numParams, NULL); #if PG_VERSION_NUM >= PG_VERSION_14 diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index a26bf158d..b8d87c4b7 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -798,7 +798,7 @@ DerivedColumnNameList(uint32 columnCount, uint64 generatingJobId) appendStringInfo(columnName, UINT64_FORMAT "_", generatingJobId); appendStringInfo(columnName, "%u", columnIndex); - Value *columnValue = makeString(columnName->data); + String *columnValue = makeString(columnName->data); columnNameList = lappend(columnNameList, columnValue); } diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 017b46149..7c57a77f2 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -151,7 +151,7 @@ static Job * RouterJob(Query *originalQuery, static bool RelationPrunesToMultipleShards(List *relationShardList); static void NormalizeMultiRowInsertTargetList(Query *query); static void AppendNextDummyColReference(Alias *expendedReferenceNames); -static Value * MakeDummyColumnString(int dummyColumnId); +static String * MakeDummyColumnString(int dummyColumnId); static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError); static List * GroupInsertValuesByShardId(List *insertValuesList); static List * ExtractInsertValuesList(Query *query, Var *partitionColumn); @@ -3249,7 +3249,7 @@ AppendNextDummyColReference(Alias *expendedReferenceNames) { int existingColReferences = list_length(expendedReferenceNames->colnames); int nextColReferenceId = existingColReferences + 1; - Value *missingColumnString = MakeDummyColumnString(nextColReferenceId); + String *missingColumnString = MakeDummyColumnString(nextColReferenceId); expendedReferenceNames->colnames = lappend(expendedReferenceNames->colnames, missingColumnString); } @@ -3259,12 +3259,12 @@ AppendNextDummyColReference(Alias *expendedReferenceNames) * MakeDummyColumnString returns a String (Value) object by appending given * integer to end of the "column" string. */ -static Value * +static String * MakeDummyColumnString(int dummyColumnId) { StringInfo dummyColumnStringInfo = makeStringInfo(); appendStringInfo(dummyColumnStringInfo, "column%d", dummyColumnId); - Value *dummyColumnString = makeString(dummyColumnStringInfo->data); + String *dummyColumnString = makeString(dummyColumnStringInfo->data); return dummyColumnString; } diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index 9138b1b80..e84c821fa 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -1952,7 +1952,7 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList, */ if (columnAliasCount >= columnNumber) { - Value *columnAlias = (Value *) list_nth(columnAliasList, columnNumber - 1); + String *columnAlias = (String *) list_nth(columnAliasList, columnNumber - 1); Assert(IsA(columnAlias, String)); newTargetEntry->resname = strVal(columnAlias); } diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c index 7388ff383..8f4821bc1 100644 --- a/src/backend/distributed/relay/relay_event_utility.c +++ b/src/backend/distributed/relay/relay_event_utility.c @@ -326,8 +326,8 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) if (objectType == OBJECT_TABLE || objectType == OBJECT_INDEX || objectType == OBJECT_FOREIGN_TABLE || objectType == OBJECT_FOREIGN_SERVER) { - Value *relationSchemaNameValue = NULL; - Value *relationNameValue = NULL; + String *relationSchemaNameValue = NULL; + String *relationNameValue = NULL; uint32 dropCount = list_length(dropStmt->objects); if (dropCount > 1) @@ -381,11 +381,11 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) /* prefix with schema name if it is not added already */ if (relationSchemaNameValue == NULL) { - Value *schemaNameValue = makeString(pstrdup(schemaName)); + String *schemaNameValue = makeString(pstrdup(schemaName)); relationNameList = lcons(schemaNameValue, relationNameList); } - char **relationName = &(relationNameValue->val.str); + char **relationName = &(strVal(relationNameValue)); AppendShardIdToName(relationName, shardId); } else if (objectType == OBJECT_POLICY) @@ -750,10 +750,10 @@ UpdateWholeRowColumnReferencesWalker(Node *node, uint64 *shardId) * extend the penultimate element with the shardId. */ int colrefFieldCount = list_length(columnRef->fields); - Value *relnameValue = list_nth(columnRef->fields, colrefFieldCount - 2); + String *relnameValue = list_nth(columnRef->fields, colrefFieldCount - 2); Assert(IsA(relnameValue, String)); - AppendShardIdToName(&relnameValue->val.str, *shardId); + AppendShardIdToName(&strVal(relnameValue), *shardId); } /* might be more than one ColumnRef to visit */ diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 00b541968..65c12e92d 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -24,8 +24,11 @@ #include "safe_lib.h" #include "catalog/pg_authid.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_extension.h" #include "citus_version.h" #include "commands/explain.h" +#include "commands/extension.h" #include "common/string.h" #include "executor/executor.h" #include "distributed/backend_data.h" @@ -74,7 +77,7 @@ #include "distributed/shared_library_init.h" #include "distributed/statistics_collection.h" #include "distributed/subplan_execution.h" - +#include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/transaction_recovery.h" #include "distributed/utils/directory.h" @@ -141,9 +144,12 @@ static int ReplicationModel = REPLICATION_MODEL_STREAMING; /* we override the application_name assign_hook and keep a pointer to the old one */ static GucStringAssignHook OldApplicationNameAssignHook = NULL; +static object_access_hook_type PrevObjectAccessHook = NULL; void _PG_init(void); +static void CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int + subId, void *arg); static void DoInitialCleanup(void); static void ResizeStackToMaximumDepth(void); static void multi_log_hook(ErrorData *edata); @@ -159,9 +165,9 @@ static bool ErrorIfNotASuitableDeadlockFactor(double *newval, void **extra, static bool WarnIfDeprecatedExecutorUsed(int *newval, void **extra, GucSource source); static bool WarnIfReplicationModelIsSet(int *newval, void **extra, GucSource source); static bool NoticeIfSubqueryPushdownEnabled(bool *newval, void **extra, GucSource source); -static bool HideShardsFromAppNamePrefixesCheckHook(char **newval, void **extra, - GucSource source); -static void HideShardsFromAppNamePrefixesAssignHook(const char *newval, void *extra); +static bool ShowShardsForAppNamePrefixesCheckHook(char **newval, void **extra, + GucSource source); +static void ShowShardsForAppNamePrefixesAssignHook(const char *newval, void *extra); static void ApplicationNameAssignHook(const char *newval, void *extra); static bool NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source); static void NodeConninfoGucAssignHook(const char *newval, void *extra); @@ -334,9 +340,6 @@ _PG_init(void) /* intercept planner */ planner_hook = distributed_planner; - /* register utility hook */ - ProcessUtility_hook = multi_ProcessUtility; - /* register for planner hook */ set_rel_pathlist_hook = multi_relation_restriction_hook; set_join_pathlist_hook = multi_join_restriction_hook; @@ -384,23 +387,22 @@ _PG_init(void) DoInitialCleanup(); } + PrevObjectAccessHook = object_access_hook; + object_access_hook = CitusObjectAccessHook; + /* ensure columnar module is loaded at the right time */ load_file(COLUMNAR_MODULE_NAME, false); /* - * Now, acquire symbols from columnar module. First, acquire - * the address of the set options hook, and set it so that we - * can propagate options changes. + * Register utility hook. This must be done after loading columnar, so + * that the citus hook is called first, followed by the columnar hook, + * followed by standard_ProcessUtility. That allows citus to distribute + * ALTER TABLE commands before columnar strips out the columnar-specific + * options. */ - ColumnarTableSetOptions_hook_type **ColumnarTableSetOptions_hook_ptr = - (ColumnarTableSetOptions_hook_type **) find_rendezvous_variable( - COLUMNAR_SETOPTIONS_HOOK_SYM); - - /* rendezvous variable registered during columnar initialization */ - Assert(ColumnarTableSetOptions_hook_ptr != NULL); - Assert(*ColumnarTableSetOptions_hook_ptr != NULL); - - **ColumnarTableSetOptions_hook_ptr = ColumnarTableSetOptionsHook; + PrevProcessUtility = (ProcessUtility_hook != NULL) ? + ProcessUtility_hook : standard_ProcessUtility; + ProcessUtility_hook = multi_ProcessUtility; /* * Acquire symbols for columnar functions that citus calls. @@ -670,6 +672,43 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.allow_nested_distributed_execution", + gettext_noop("Enables distributed execution within a task " + "of another distributed execution."), + gettext_noop("Nested distributed execution can happen when Citus " + "pushes down a call to a user-defined function within " + "a distributed query, and the function contains another " + "distributed query. In this scenario, Citus makes no " + "guarantess with regards to correctness and it is therefore " + "disallowed by default. This setting can be used to allow " + "nested distributed execution."), + &AllowNestedDistributedExecution, + false, + PGC_USERSET, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + + DefineCustomBoolVariable( + "citus.allow_unsafe_locks_from_workers", + gettext_noop("Enables acquiring a distributed lock from a worker " + "when the coordinator is not in the metadata"), + gettext_noop("Set to false by default. If set to true, enables " + "acquiring a distributed lock from a worker " + "when the coordinator is not in the metadata. " + "This type of lock is unsafe because the worker will not be " + "able to lock the coordinator; the coordinator will be able to " + "intialize distributed operations on the resources locked " + "by the worker. This can lead to concurrent operations from the " + "coordinator and distributed deadlocks since the coordinator " + "and the workers would not acquire locks across the same nodes " + "in the same order."), + &EnableAcquiringUnsafeLockFromWorkers, + false, + PGC_USERSET, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.check_available_space_before_move", gettext_noop("When enabled will check free disk space before a shard move"), @@ -1174,24 +1213,6 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); - DefineCustomStringVariable( - "citus.hide_shards_from_app_name_prefixes", - gettext_noop("If application_name starts with one of these values, hide shards"), - gettext_noop("Citus places distributed tables and shards in the same schema. " - "That can cause confusion when inspecting the list of tables on " - "a node with shards. This GUC can be used to hide the shards from " - "pg_class for certain applications based on the application_name " - "of the connection. The default is *, which hides shards from all " - "applications. This behaviour can be overridden using the " - "citus.override_table_visibility setting"), - &HideShardsFromAppNamePrefixes, - "*", - PGC_USERSET, - GUC_STANDARD, - HideShardsFromAppNamePrefixesCheckHook, - HideShardsFromAppNamePrefixesAssignHook, - NULL); - DefineCustomIntVariable( "citus.isolation_test_session_process_id", NULL, @@ -1716,6 +1737,25 @@ RegisterCitusConfigVariables(void) GUC_STANDARD, NULL, NULL, NULL); + DefineCustomStringVariable( + "citus.show_shards_for_app_name_prefixes", + gettext_noop("If application_name starts with one of these values, show shards"), + gettext_noop("Citus places distributed tables and shards in the same schema. " + "That can cause confusion when inspecting the list of tables on " + "a node with shards. By default the shards are hidden from " + "pg_class. This GUC can be used to show the shards to certain " + "applications based on the application_name of the connection. " + "The default is empty string, which hides shards from all " + "applications. This behaviour can be overridden using the " + "citus.override_table_visibility setting"), + &ShowShardsForAppNamePrefixes, + "", + PGC_USERSET, + GUC_STANDARD, + ShowShardsForAppNamePrefixesCheckHook, + ShowShardsForAppNamePrefixesAssignHook, + NULL); + DefineCustomBoolVariable( "citus.sort_returning", gettext_noop("Sorts the RETURNING clause to get consistent test output"), @@ -1985,12 +2025,12 @@ WarnIfReplicationModelIsSet(int *newval, void **extra, GucSource source) /* - * HideShardsFromAppNamePrefixesCheckHook ensures that the - * citus.hide_shards_from_app_name_prefixes holds a valid list of application_name + * ShowShardsForAppNamePrefixesCheckHook ensures that the + * citus.show_shards_for_app_name_prefixes holds a valid list of application_name * values. */ static bool -HideShardsFromAppNamePrefixesCheckHook(char **newval, void **extra, GucSource source) +ShowShardsForAppNamePrefixesCheckHook(char **newval, void **extra, GucSource source) { List *prefixList = NIL; @@ -2020,7 +2060,7 @@ HideShardsFromAppNamePrefixesCheckHook(char **newval, void **extra, GucSource so if (strcmp(prefixAscii, appNamePrefix) != 0) { - GUC_check_errdetail("prefix %s in citus.hide_shards_from_app_name_prefixes " + GUC_check_errdetail("prefix %s in citus.show_shards_for_app_name_prefixes " "contains non-ascii characters", appNamePrefix); return false; } @@ -2031,12 +2071,12 @@ HideShardsFromAppNamePrefixesCheckHook(char **newval, void **extra, GucSource so /* - * HideShardsFromAppNamePrefixesAssignHook ensures changes to - * citus.hide_shards_from_app_name_prefixes are reflected in the decision + * ShowShardsForAppNamePrefixesAssignHook ensures changes to + * citus.show_shards_for_app_name_prefixes are reflected in the decision * whether or not to show shards. */ static void -HideShardsFromAppNamePrefixesAssignHook(const char *newval, void *extra) +ShowShardsForAppNamePrefixesAssignHook(const char *newval, void *extra) { ResetHideShardsDecision(); } @@ -2050,6 +2090,7 @@ static void ApplicationNameAssignHook(const char *newval, void *extra) { ResetHideShardsDecision(); + ResetCitusBackendType(); OldApplicationNameAssignHook(newval, extra); } @@ -2295,3 +2336,29 @@ IsSuperuser(char *roleName) return isSuperuser; } + + +/* + * CitusObjectAccessHook is called when an object is created. + * + * We currently use it to track CREATE EXTENSION citus; operations to make sure we + * clear the metadata if the transaction is rolled back. + */ +static void +CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int subId, + void *arg) +{ + if (PrevObjectAccessHook) + { + PrevObjectAccessHook(access, classId, objectId, subId, arg); + } + + /* Checks if the access is post_create and that it's an extension id */ + if (access == OAT_POST_CREATE && classId == ExtensionRelationId) + { + /* There's currently an engine bug that makes it difficult to check + * the provided objectId with extension oid so we will set the value + * regardless if it's citus being created */ + SetCreateCitusTransactionLevel(GetCurrentTransactionNestLevel()); + } +} diff --git a/src/backend/distributed/sql/citus--10.0-1--10.0-2.sql b/src/backend/distributed/sql/citus--10.0-1--10.0-2.sql index a72e2edee..e3e8f3b77 100644 --- a/src/backend/distributed/sql/citus--10.0-1--10.0-2.sql +++ b/src/backend/distributed/sql/citus--10.0-1--10.0-2.sql @@ -1,12 +1,18 @@ -- citus--10.0-1--10.0-2 --#include "../../columnar/sql/columnar--10.0-1--10.0-2.sql" +DO $$ begin raise log '%', 'begin 10.0-1--10.0-2'; end; $$; DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN + IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN #include "../../columnar/sql/columnar--10.0-1--10.0-2.sql" END IF; END; $check_columnar$; GRANT SELECT ON public.citus_tables TO public; +DO $$ begin raise log '%', ' 10.0-1--10.0-2'; end; $$; diff --git a/src/backend/distributed/sql/citus--10.0-4--10.1-1.sql b/src/backend/distributed/sql/citus--10.0-4--10.1-1.sql index f3e559d07..5dc8ca268 100644 --- a/src/backend/distributed/sql/citus--10.0-4--10.1-1.sql +++ b/src/backend/distributed/sql/citus--10.0-4--10.1-1.sql @@ -3,6 +3,7 @@ -- add the current database to the distributed objects if not already in there. -- this is to reliably propagate some of the alter database commands that might be -- supported. + INSERT INTO citus.pg_dist_object SELECT 'pg_catalog.pg_database'::regclass::oid AS oid, (SELECT oid FROM pg_database WHERE datname = current_database()) as objid, @@ -12,7 +13,11 @@ ON CONFLICT DO NOTHING; --#include "../../columnar/sql/columnar--10.0-3--10.1-1.sql" DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN + IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN #include "../../columnar/sql/columnar--10.0-3--10.1-1.sql" END IF; END; @@ -55,4 +60,5 @@ WHERE repmodel = 'c' DROP TRIGGER pg_dist_rebalance_strategy_enterprise_check_trigger ON pg_catalog.pg_dist_rebalance_strategy; DROP FUNCTION citus_internal.pg_dist_rebalance_strategy_enterprise_check(); +DO $$ begin raise log '%', '10.0-4--10.1-1'; end; $$; #include "udfs/citus_cleanup_orphaned_shards/10.1-1.sql" diff --git a/src/backend/distributed/sql/citus--10.1-1--10.2-1.sql b/src/backend/distributed/sql/citus--10.1-1--10.2-1.sql index b6dc93385..97676e005 100644 --- a/src/backend/distributed/sql/citus--10.1-1--10.2-1.sql +++ b/src/backend/distributed/sql/citus--10.1-1--10.2-1.sql @@ -12,7 +12,11 @@ ALTER TABLE pg_catalog.pg_dist_placement ADD CONSTRAINT placement_shardid_groupi --#include "../../columnar/sql/columnar--10.1-1--10.2-1.sql" DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN + IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN #include "../../columnar/sql/columnar--10.1-1--10.2-1.sql" END IF; END; @@ -29,6 +33,8 @@ $check_columnar$; #include "udfs/get_missing_time_partition_ranges/10.2-1.sql" #include "udfs/worker_nextval/10.2-1.sql" +DO $$ begin raise log '%', 'begin 10.1-1--10.2-1'; end; $$; + DROP FUNCTION pg_catalog.citus_drop_all_shards(regclass, text, text); CREATE FUNCTION pg_catalog.citus_drop_all_shards(logicalrelid regclass, schema_name text, @@ -42,3 +48,4 @@ COMMENT ON FUNCTION pg_catalog.citus_drop_all_shards(regclass, text, text, boole #include "udfs/citus_drop_trigger/10.2-1.sql"; #include "udfs/citus_prepare_pg_upgrade/10.2-1.sql" #include "udfs/citus_finish_pg_upgrade/10.2-1.sql" +DO $$ begin raise log '%', '10.1-1--10.2-1'; end; $$; diff --git a/src/backend/distributed/sql/citus--10.2-1--10.2-2.sql b/src/backend/distributed/sql/citus--10.2-1--10.2-2.sql index 8245eecce..c7b99154a 100644 --- a/src/backend/distributed/sql/citus--10.2-1--10.2-2.sql +++ b/src/backend/distributed/sql/citus--10.2-1--10.2-2.sql @@ -3,10 +3,16 @@ -- bump version to 10.2-2 --#include "../../columnar/sql/columnar--10.2-1--10.2-2.sql" +DO $$ begin raise log '%', 'begin 10.2-1--10.2-2'; end; $$; DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN + IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN #include "../../columnar/sql/columnar--10.2-1--10.2-2.sql" END IF; END; $check_columnar$; +DO $$ begin raise log '%', '10.2-1--10.2-2'; end; $$; diff --git a/src/backend/distributed/sql/citus--10.2-2--10.2-3.sql b/src/backend/distributed/sql/citus--10.2-2--10.2-3.sql index fc01c1fc3..66316eaf0 100644 --- a/src/backend/distributed/sql/citus--10.2-2--10.2-3.sql +++ b/src/backend/distributed/sql/citus--10.2-2--10.2-3.sql @@ -5,7 +5,11 @@ --#include "../../columnar/sql/columnar--10.2-2--10.2-3.sql" DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN + IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN #include "../../columnar/sql/columnar--10.2-2--10.2-3.sql" END IF; END; diff --git a/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql b/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql index 19f74b09d..2d40dd219 100644 --- a/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql +++ b/src/backend/distributed/sql/citus--10.2-3--10.2-4.sql @@ -3,11 +3,16 @@ -- bump version to 10.2-4 --#include "../../columnar/sql/columnar--10.2-3--10.2-4.sql" +DO $$ begin raise log '%', 'begin 10.2-3--10.2-4'; end; $$; DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN - #include "../../columnar/sql/columnar--10.2-3--10.2-4.sql" - END IF; +IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN + #include "../../columnar/sql/columnar--10.2-3--10.2-4.sql" +END IF; END; $check_columnar$; @@ -15,3 +20,4 @@ $check_columnar$; #include "udfs/fix_all_partition_shard_index_names/10.2-4.sql" #include "udfs/worker_fix_partition_shard_index_names/10.2-4.sql" #include "udfs/citus_finish_pg_upgrade/10.2-4.sql" +DO $$ begin raise log '%', '10.2-3--10.2-4'; end; $$; diff --git a/src/backend/distributed/sql/citus--11.0-1--11.0-2.sql b/src/backend/distributed/sql/citus--11.0-1--11.0-2.sql index 7f39b5980..53ebae152 100644 --- a/src/backend/distributed/sql/citus--11.0-1--11.0-2.sql +++ b/src/backend/distributed/sql/citus--11.0-1--11.0-2.sql @@ -1 +1,7 @@ --- bump version to 11.0-2 +#include "udfs/citus_shards_on_worker/11.0-2.sql" +#include "udfs/citus_shard_indexes_on_worker/11.0-2.sql" +#include "udfs/citus_is_coordinator/11.0-2.sql" +#include "udfs/citus_disable_node/11.0-2.sql" +#include "udfs/run_command_on_coordinator/11.0-2.sql" +#include "udfs/start_metadata_sync_to_all_nodes/11.0-2.sql" +#include "udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql" diff --git a/src/backend/distributed/sql/citus--11.0-2--11.1-1.sql b/src/backend/distributed/sql/citus--11.0-2--11.1-1.sql index 80a9b48be..b7aacf84b 100644 --- a/src/backend/distributed/sql/citus--11.0-2--11.1-1.sql +++ b/src/backend/distributed/sql/citus--11.0-2--11.1-1.sql @@ -1,3 +1,4 @@ +DO $$ begin raise log '%', 'begin 11.0-2--11.1-1'; end; $$; DROP FUNCTION pg_catalog.worker_create_schema(bigint,text); DROP FUNCTION pg_catalog.worker_cleanup_job_schema_cache(); DROP FUNCTION pg_catalog.worker_fetch_foreign_file(text, text, bigint, text[], integer[]); @@ -7,43 +8,36 @@ DROP FUNCTION pg_catalog.worker_merge_files_into_table(bigint, integer, text[], DROP FUNCTION pg_catalog.worker_range_partition_table(bigint, integer, text, text, oid, anyarray); DROP FUNCTION pg_catalog.worker_repartition_cleanup(bigint); - --- bump version to 11.1-1 as version 'Z' --- drop columnar objects if they exists in citus extension - +-- If upgrading citus, the columnar objects are already being a part of the +-- citus extension, and must be detached so that they can be attached +-- to the citus_columnar extension. DO $check_citus$ BEGIN -IF EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e -INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) -INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) -WHERE e.extname='citus' and p.proname = 'columnar_handler' -) THEN -ALTER EXTENSION citus DROP SCHEMA columnar; -ALTER EXTENSION citus DROP SEQUENCE columnar.storageid_seq; + IF EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus' and p.proname = 'columnar_handler' + ) THEN + ALTER EXTENSION citus DROP SCHEMA columnar; + ALTER EXTENSION citus DROP SEQUENCE columnar.storageid_seq; --- columnar tables -ALTER EXTENSION citus DROP TABLE columnar.options; -ALTER EXTENSION citus DROP TABLE columnar.stripe; -ALTER EXTENSION citus DROP TABLE columnar.chunk_group; -ALTER EXTENSION citus DROP TABLE columnar.chunk; + -- columnar tables + ALTER EXTENSION citus DROP TABLE columnar.options; + ALTER EXTENSION citus DROP TABLE columnar.stripe; + ALTER EXTENSION citus DROP TABLE columnar.chunk_group; + ALTER EXTENSION citus DROP TABLE columnar.chunk; -DO $proc$ -BEGIN --- columnar functions -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE $$ - ALTER EXTENSION citus DROP FUNCTION columnar.columnar_handler; - ALTER EXTENSION citus DROP ACCESS METHOD columnar; - ALTER EXTENSION citus DROP FUNCTION pg_catalog.alter_columnar_table_set; - ALTER EXTENSION citus DROP FUNCTION pg_catalog.alter_columnar_table_reset; - $$; -END IF; -END$proc$; + ALTER EXTENSION citus DROP FUNCTION columnar.columnar_handler; + ALTER EXTENSION citus DROP ACCESS METHOD columnar; + ALTER EXTENSION citus DROP FUNCTION pg_catalog.alter_columnar_table_set; + ALTER EXTENSION citus DROP FUNCTION pg_catalog.alter_columnar_table_reset; --- functions under citus_internal for columnar -ALTER EXTENSION citus DROP FUNCTION citus_internal.upgrade_columnar_storage; -ALTER EXTENSION citus DROP FUNCTION citus_internal.downgrade_columnar_storage; -ALTER EXTENSION citus DROP FUNCTION citus_internal.columnar_ensure_am_depends_catalog; + -- functions under citus_internal for columnar + ALTER EXTENSION citus DROP FUNCTION citus_internal.upgrade_columnar_storage; + ALTER EXTENSION citus DROP FUNCTION citus_internal.downgrade_columnar_storage; + ALTER EXTENSION citus DROP FUNCTION citus_internal.columnar_ensure_am_depends_catalog; -END IF; + END IF; END $check_citus$; +#include "udfs/citus_finish_pg_upgrade/11.1-1.sql" +DO $$ begin raise log '%', ' 11.0-2--11.1-1'; end; $$; diff --git a/src/backend/distributed/sql/citus--8.0-3--8.0-4.sql b/src/backend/distributed/sql/citus--8.0-3--8.0-4.sql index 0cbf57f3f..9ec750022 100644 --- a/src/backend/distributed/sql/citus--8.0-3--8.0-4.sql +++ b/src/backend/distributed/sql/citus--8.0-3--8.0-4.sql @@ -6,6 +6,6 @@ RETURNS BOOL LANGUAGE C STRICT as 'MODULE_PATHNAME', $$lock_relation_if_exists$$; COMMENT ON FUNCTION lock_relation_if_exists(table_name text, lock_mode text) -IS 'locks relation in the lock_mode if the relation exists'; +IS 'used internally to locks relation in the lock_mode if the relation exists without throwing errors; consider using LOCK * IN * MODE instead'; RESET search_path; diff --git a/src/backend/distributed/sql/citus--9.5-1--10.0-4.sql b/src/backend/distributed/sql/citus--9.5-1--10.0-4.sql index 751665b7c..a25f2c8d2 100644 --- a/src/backend/distributed/sql/citus--9.5-1--10.0-4.sql +++ b/src/backend/distributed/sql/citus--9.5-1--10.0-4.sql @@ -6,7 +6,7 @@ -- cat citus--9.5-1--10.0-1.sql citus--10.0-1--10.0-2.sql citus--10.0-2--10.0-3.sql > citus--9.5-1--10.0-4.sql -- copy of citus--9.5-1--10.0-1 - +DO $$ begin raise log '%', 'begin 9.5-1--10.0-4'; end; $$; DROP FUNCTION pg_catalog.upgrade_to_reference_table(regclass); DROP FUNCTION IF EXISTS pg_catalog.citus_total_relation_size(regclass); @@ -38,9 +38,13 @@ DROP FUNCTION IF EXISTS pg_catalog.citus_total_relation_size(regclass); --#include "../../columnar/sql/columnar--9.5-1--10.0-1.sql" DO $check_columnar$ BEGIN - IF NOT EXISTS (select 1 from pg_extension where extname='citus_columnar') THEN - #include "../../columnar/sql/columnar--9.5-1--10.0-1.sql" - END IF; +IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN +#include "../../columnar/sql/columnar--9.5-1--10.0-1.sql" +END IF; END; $check_columnar$; @@ -179,7 +183,19 @@ GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; -- copy of citus--10.0-1--10.0-2 +--#include "../../columnar/sql/columnar--10.0-1--10.0-2.sql" +DO $check_columnar$ +BEGIN +IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) + WHERE e.extname='citus_columnar' and p.proname = 'columnar_handler' + ) THEN #include "../../columnar/sql/columnar--10.0-1--10.0-2.sql" +END IF; +END; +$check_columnar$; + -- copy of citus--10.0-2--10.0-3 @@ -222,3 +238,4 @@ COMMENT ON FUNCTION pg_catalog.citus_get_active_worker_nodes() RESET search_path; +DO $$ begin raise log '%', ' 9.5-1--10.0-4'; end; $$; diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql b/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql index 163dca315..6569f4bbc 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql @@ -1 +1,18 @@ --- bump down version to 11.0-1 +#include "../udfs/citus_shards_on_worker/11.0-1.sql" +#include "../udfs/citus_shard_indexes_on_worker/11.0-1.sql" + +DROP FUNCTION pg_catalog.citus_disable_node(text, integer, bool); +CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_disable_node$$; +COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool) + IS 'removes node from the cluster temporarily'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC; + +DROP FUNCTION pg_catalog.citus_is_coordinator(); +DROP FUNCTION pg_catalog.run_command_on_coordinator(text,boolean); + +DROP FUNCTION pg_catalog.start_metadata_sync_to_all_nodes(); +DROP FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(boolean); diff --git a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql index 2f8f4148d..c8bba83cd 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql @@ -54,18 +54,10 @@ ALTER EXTENSION citus ADD TABLE columnar.stripe; ALTER EXTENSION citus ADD TABLE columnar.chunk_group; ALTER EXTENSION citus ADD TABLE columnar.chunk; -DO $proc$ -BEGIN --- columnar functions -IF substring(current_Setting('server_version'), '\d+')::int >= 12 THEN - EXECUTE $$ - ALTER EXTENSION citus ADD FUNCTION columnar.columnar_handler; - ALTER EXTENSION citus ADD ACCESS METHOD columnar; - ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_columnar_table_set; - ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_columnar_table_reset; - $$; -END IF; -END$proc$; +ALTER EXTENSION citus ADD FUNCTION columnar.columnar_handler; +ALTER EXTENSION citus ADD ACCESS METHOD columnar; +ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_columnar_table_set; +ALTER EXTENSION citus ADD FUNCTION pg_catalog.alter_columnar_table_reset; ALTER EXTENSION citus ADD FUNCTION citus_internal.upgrade_columnar_storage; ALTER EXTENSION citus ADD FUNCTION citus_internal.downgrade_columnar_storage; diff --git a/src/backend/distributed/sql/udfs/citus_disable_node/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_disable_node/11.0-2.sql new file mode 100644 index 000000000..182334d75 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_disable_node/11.0-2.sql @@ -0,0 +1,9 @@ +DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool); +CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, synchronous bool default false) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_disable_node$$; +COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, synchronous bool) + IS 'removes node from the cluster temporarily'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_disable_node/latest.sql b/src/backend/distributed/sql/udfs/citus_disable_node/latest.sql index 968d119b1..182334d75 100644 --- a/src/backend/distributed/sql/udfs/citus_disable_node/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_disable_node/latest.sql @@ -1,9 +1,9 @@ -DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer); -CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false) +DROP FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool); +CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, synchronous bool default false) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_disable_node$$; -COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool) +COMMENT ON FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, synchronous bool) IS 'removes node from the cluster temporarily'; REVOKE ALL ON FUNCTION pg_catalog.citus_disable_node(text,int, bool) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql new file mode 100644 index 000000000..2b4bb17f6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql @@ -0,0 +1,221 @@ +-- citus_finalize_upgrade_to_citus11() is a helper UDF ensures +-- the upgrade to Citus 11 is finished successfully. Upgrade to +-- Citus 11 requires all active primary worker nodes to get the +-- metadata. And, this function's job is to sync the metadata to +-- the nodes that does not already have +-- once the function finishes without any errors and returns true +-- the cluster is ready for running distributed queries from +-- the worker nodes. When debug is enabled, the function provides +-- more information to the user. +CREATE OR REPLACE FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(enforce_version_check bool default true) + RETURNS bool + LANGUAGE plpgsql + AS $$ +BEGIN + + --------------------------------------------- + -- This script consists of N stages + -- Each step is documented, and if log level + -- is reduced to DEBUG1, each step is logged + -- as well + --------------------------------------------- + +------------------------------------------------------------------------------------------ + -- STAGE 0: Ensure no concurrent node metadata changing operation happens while this + -- script is running via acquiring a strong lock on the pg_dist_node +------------------------------------------------------------------------------------------ +BEGIN + LOCK TABLE pg_dist_node IN EXCLUSIVE MODE NOWAIT; + + EXCEPTION WHEN OTHERS THEN + RAISE 'Another node metadata changing operation is in progress, try again.'; +END; + +------------------------------------------------------------------------------------------ + -- STAGE 1: We want all the commands to run in the same transaction block. Without + -- sequential mode, metadata syncing cannot be done in a transaction block along with + -- other commands +------------------------------------------------------------------------------------------ + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + +------------------------------------------------------------------------------------------ + -- STAGE 2: Ensure we have the prerequisites + -- (a) only superuser can run this script + -- (b) cannot be executed when enable_ddl_propagation is False + -- (c) can only be executed from the coordinator +------------------------------------------------------------------------------------------ +DECLARE + is_superuser_running boolean := False; + enable_ddl_prop boolean:= False; + local_group_id int := 0; +BEGIN + SELECT rolsuper INTO is_superuser_running FROM pg_roles WHERE rolname = current_user; + IF is_superuser_running IS NOT True THEN + RAISE EXCEPTION 'This operation can only be initiated by superuser'; + END IF; + + SELECT current_setting('citus.enable_ddl_propagation') INTO enable_ddl_prop; + IF enable_ddl_prop IS NOT True THEN + RAISE EXCEPTION 'This operation cannot be completed when citus.enable_ddl_propagation is False.'; + END IF; + + SELECT groupid INTO local_group_id FROM pg_dist_local_group; + + IF local_group_id != 0 THEN + RAISE EXCEPTION 'Operation is not allowed on this node. Connect to the coordinator and run it again.'; + ELSE + RAISE DEBUG 'We are on the coordinator, continue to sync metadata'; + END IF; +END; + + + ------------------------------------------------------------------------------------------ + -- STAGE 3: Ensure all primary nodes are active + ------------------------------------------------------------------------------------------ + DECLARE + primary_disabled_worker_node_count int := 0; + BEGIN + SELECT count(*) INTO primary_disabled_worker_node_count FROM pg_dist_node + WHERE groupid != 0 AND noderole = 'primary' AND NOT isactive; + + IF primary_disabled_worker_node_count != 0 THEN + RAISE EXCEPTION 'There are inactive primary worker nodes, you need to activate the nodes first.' + 'Use SELECT citus_activate_node() to activate the disabled nodes'; + ELSE + RAISE DEBUG 'There are no disabled worker nodes, continue to sync metadata'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 4: Ensure there is no connectivity issues in the cluster + ------------------------------------------------------------------------------------------ + DECLARE + all_nodes_can_connect_to_each_other boolean := False; + BEGIN + SELECT bool_and(coalesce(result, false)) INTO all_nodes_can_connect_to_each_other FROM citus_check_cluster_node_health(); + + IF all_nodes_can_connect_to_each_other != True THEN + RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' + 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' + 'to check the cluster health'; + ELSE + RAISE DEBUG 'Cluster is healthy, all nodes can connect to each other'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 5: Ensure all nodes are on the same version + ------------------------------------------------------------------------------------------ + DECLARE + coordinator_version text := ''; + worker_node_version text := ''; + worker_node_version_count int := 0; + + BEGIN + SELECT extversion INTO coordinator_version from pg_extension WHERE extname = 'citus'; + + -- first, check if all nodes have the same versions + SELECT + count(distinct result) INTO worker_node_version_count + FROM + run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'''); + IF enforce_version_check AND worker_node_version_count != 1 THEN + RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' + 'some of the workers have different versions.'; + ELSE + RAISE DEBUG 'All worker nodes have the same Citus version'; + END IF; + + -- second, check if all nodes have the same versions + SELECT + result INTO worker_node_version + FROM + run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';') + GROUP BY result; + + IF enforce_version_check AND coordinator_version != worker_node_version THEN + RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' + 'the coordinator has version % and the worker(s) has %', + coordinator_version, worker_node_version; + ELSE + RAISE DEBUG 'All nodes have the same Citus version'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 6: Ensure all the partitioned tables have the proper naming structure + -- As described on https://github.com/citusdata/citus/issues/4962 + -- existing indexes on partitioned distributed tables can collide + -- with the index names exists on the shards + -- luckily, we know how to fix it. + -- And, note that we should do this even if the cluster is a basic plan + -- (e.g., single node Citus) such that when cluster scaled out, everything + -- works as intended + -- And, this should be done only ONCE for a cluster as it can be a pretty + -- time consuming operation. Thus, even if the function is called multiple time, + -- we keep track of it and do not re-execute this part if not needed. + ------------------------------------------------------------------------------------------ + DECLARE + partitioned_table_exists_pre_11 boolean:=False; + BEGIN + + -- we recorded if partitioned tables exists during upgrade to Citus 11 + SELECT metadata->>'partitioned_citus_table_exists_pre_11' INTO partitioned_table_exists_pre_11 + FROM pg_dist_node_metadata; + + IF partitioned_table_exists_pre_11 IS NOT NULL AND partitioned_table_exists_pre_11 THEN + + -- this might take long depending on the number of partitions and shards... + RAISE NOTICE 'Preparing all the existing partitioned table indexes'; + PERFORM pg_catalog.fix_all_partition_shard_index_names(); + + -- great, we are done with fixing the existing wrong index names + -- so, lets remove this + UPDATE pg_dist_node_metadata + SET metadata=jsonb_delete(metadata, 'partitioned_citus_table_exists_pre_11'); + ELSE + RAISE DEBUG 'There are no partitioned tables that should be fixed'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 7: Return early if there are no primary worker nodes + -- We don't strictly need this step, but it gives a nicer notice message + ------------------------------------------------------------------------------------------ + DECLARE + primary_worker_node_count bigint :=0; + BEGIN + SELECT count(*) INTO primary_worker_node_count FROM pg_dist_node WHERE groupid != 0 AND noderole = 'primary'; + + IF primary_worker_node_count = 0 THEN + RAISE NOTICE 'There are no primary worker nodes, no need to sync metadata to any node'; + RETURN true; + ELSE + RAISE DEBUG 'There are % primary worker nodes, continue to sync metadata', primary_worker_node_count; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 8: Do the actual metadata & object syncing to the worker nodes + -- For the "already synced" metadata nodes, we do not strictly need to + -- sync the objects & metadata, but there is no harm to do it anyway + -- it'll only cost some execution time but makes sure that we have a + -- a consistent metadata & objects across all the nodes + ------------------------------------------------------------------------------------------ + DECLARE + BEGIN + + -- this might take long depending on the number of tables & objects ... + RAISE NOTICE 'Preparing to sync the metadata to all nodes'; + + PERFORM start_metadata_sync_to_all_nodes(); + END; + + RETURN true; +END; +$$; +COMMENT ON FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(bool) + IS 'finalizes upgrade to Citus'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(bool) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql index 7b7d357ff..2b4bb17f6 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql @@ -209,9 +209,7 @@ END; -- this might take long depending on the number of tables & objects ... RAISE NOTICE 'Preparing to sync the metadata to all nodes'; - PERFORM start_metadata_sync_to_node(nodename,nodeport) - FROM - pg_dist_node WHERE groupid != 0 AND noderole = 'primary'; + PERFORM start_metadata_sync_to_all_nodes(); END; RETURN true; diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/11.1-1.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/11.1-1.sql new file mode 100644 index 000000000..caa80d51e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/11.1-1.sql @@ -0,0 +1,151 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cppu$ +DECLARE + table_name regclass; + command text; + trigger_name text; +BEGIN + + + IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN + EXECUTE $cmd$ + -- disable propagation to prevent EnsureCoordinator errors + -- the aggregate created here does not depend on Citus extension (yet) + -- since we add the dependency with the next command + SET citus.enable_ddl_propagation TO OFF; + CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray); + COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray) + IS 'concatenate input arrays into a single array'; + RESET citus.enable_ddl_propagation; + $cmd$; + ELSE + EXECUTE $cmd$ + SET citus.enable_ddl_propagation TO OFF; + CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray); + COMMENT ON AGGREGATE array_cat_agg(anyarray) + IS 'concatenate input arrays into a single array'; + RESET citus.enable_ddl_propagation; + $cmd$; + END IF; + + -- + -- Citus creates the array_cat_agg but because of a compatibility + -- issue between pg13-pg14, we drop and create it during upgrade. + -- And as Citus creates it, there needs to be a dependency to the + -- Citus extension, so we create that dependency here. + -- We are not using: + -- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg + -- because we don't have an easy way to check if the aggregate + -- exists with anyarray type or anycompatiblearray type. + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + + -- + -- restore citus catalog tables + -- + INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition; + INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard; + INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement; + INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata; + INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node; + INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group; + INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction; + INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation; + -- enterprise catalog tables + INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo; + INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo; + + INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT + name, + default_strategy, + shard_cost_function::regprocedure::regproc, + node_capacity_function::regprocedure::regproc, + shard_allowed_on_node_function::regprocedure::regproc, + default_threshold, + minimum_threshold, + improvement_threshold + FROM public.pg_dist_rebalance_strategy; + + -- + -- drop backup tables + -- + DROP TABLE public.pg_dist_authinfo; + DROP TABLE public.pg_dist_colocation; + DROP TABLE public.pg_dist_local_group; + DROP TABLE public.pg_dist_node; + DROP TABLE public.pg_dist_node_metadata; + DROP TABLE public.pg_dist_partition; + DROP TABLE public.pg_dist_placement; + DROP TABLE public.pg_dist_poolinfo; + DROP TABLE public.pg_dist_shard; + DROP TABLE public.pg_dist_transaction; + DROP TABLE public.pg_dist_rebalance_strategy; + + -- + -- reset sequences + -- + PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false); + PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false); + PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false); + + -- + -- register triggers + -- + FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition + LOOP + trigger_name := 'truncate_trigger_' || table_name::oid; + command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()'; + EXECUTE command; + command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name); + EXECUTE command; + END LOOP; + + -- + -- set dependencies + -- + INSERT INTO pg_depend + SELECT + 'pg_class'::regclass::oid as classid, + p.logicalrelid::regclass::oid as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'n' as deptype + FROM pg_catalog.pg_dist_partition p; + + -- set dependencies for columnar table access method + PERFORM columnar_internal.columnar_ensure_am_depends_catalog(); + + -- restore pg_dist_object from the stable identifiers + TRUNCATE pg_catalog.pg_dist_object; + INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid) + SELECT + address.classid, + address.objid, + address.objsubid, + naming.distribution_argument_index, + naming.colocationid + FROM + public.pg_dist_object naming, + pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address; + + DROP TABLE public.pg_dist_object; +END; +$cppu$; + +COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade() + IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade'; diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql index 2c2635687..caa80d51e 100644 --- a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql @@ -128,7 +128,7 @@ BEGIN FROM pg_catalog.pg_dist_partition p; -- set dependencies for columnar table access method - PERFORM citus_internal.columnar_ensure_am_depends_catalog(); + PERFORM columnar_internal.columnar_ensure_am_depends_catalog(); -- restore pg_dist_object from the stable identifiers TRUNCATE pg_catalog.pg_dist_object; diff --git a/src/backend/distributed/sql/udfs/citus_is_coordinator/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_is_coordinator/11.0-2.sql new file mode 100644 index 000000000..0bc83379d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_is_coordinator/11.0-2.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.citus_is_coordinator() + RETURNS bool + LANGUAGE c + STRICT +AS 'MODULE_PATHNAME', $$citus_is_coordinator$$; +COMMENT ON FUNCTION pg_catalog.citus_is_coordinator() + IS 'returns whether the current node is a coordinator'; diff --git a/src/backend/distributed/sql/udfs/citus_is_coordinator/latest.sql b/src/backend/distributed/sql/udfs/citus_is_coordinator/latest.sql new file mode 100644 index 000000000..0bc83379d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_is_coordinator/latest.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.citus_is_coordinator() + RETURNS bool + LANGUAGE c + STRICT +AS 'MODULE_PATHNAME', $$citus_is_coordinator$$; +COMMENT ON FUNCTION pg_catalog.citus_is_coordinator() + IS 'returns whether the current node is a coordinator'; diff --git a/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/11.0-2.sql new file mode 100644 index 000000000..fd4684b18 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/11.0-2.sql @@ -0,0 +1,39 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_indexes_on_worker( + OUT schema_name name, + OUT index_name name, + OUT table_type text, + OUT owner_name name, + OUT shard_name name) + RETURNS SETOF record + LANGUAGE plpgsql + SET citus.show_shards_for_app_name_prefixes = '*' + AS $$ +BEGIN + -- this is the query that \di produces, except pg_table_is_visible + -- is replaced with pg_catalog.relation_is_a_known_shard(c.oid) + RETURN QUERY + SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner", + c2.relname as "Table" + FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_catalog.pg_index i ON i.indexrelid = c.oid + LEFT JOIN pg_catalog.pg_class c2 ON i.indrelid = c2.oid + WHERE c.relkind IN ('i','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND pg_catalog.relation_is_a_known_shard(c.oid) + ORDER BY 1,2; +END; +$$; + +CREATE OR REPLACE VIEW pg_catalog.citus_shard_indexes_on_worker AS + SELECT schema_name as "Schema", + index_name as "Name", + table_type as "Type", + owner_name as "Owner", + shard_name as "Table" + FROM pg_catalog.citus_shard_indexes_on_worker() s; diff --git a/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/latest.sql b/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/latest.sql index d98cdafe5..fd4684b18 100644 --- a/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_shard_indexes_on_worker/latest.sql @@ -6,7 +6,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_shard_indexes_on_worker( OUT shard_name name) RETURNS SETOF record LANGUAGE plpgsql - SET citus.hide_shards_from_app_name_prefixes = '' + SET citus.show_shards_for_app_name_prefixes = '*' AS $$ BEGIN -- this is the query that \di produces, except pg_table_is_visible diff --git a/src/backend/distributed/sql/udfs/citus_shards_on_worker/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_shards_on_worker/11.0-2.sql new file mode 100644 index 000000000..dbb7498e8 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_shards_on_worker/11.0-2.sql @@ -0,0 +1,34 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_shards_on_worker( + OUT schema_name name, + OUT shard_name name, + OUT table_type text, + OUT owner_name name) + RETURNS SETOF record + LANGUAGE plpgsql + SET citus.show_shards_for_app_name_prefixes = '*' + AS $$ +BEGIN + -- this is the query that \d produces, except pg_table_is_visible + -- is replaced with pg_catalog.relation_is_a_known_shard(c.oid) + RETURN QUERY + SELECT n.nspname as "Schema", + c.relname as "Name", + CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' WHEN 'p' THEN 'table' END as "Type", + pg_catalog.pg_get_userbyid(c.relowner) as "Owner" + FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + WHERE c.relkind IN ('r','p','v','m','S','f','') + AND n.nspname <> 'pg_catalog' + AND n.nspname <> 'information_schema' + AND n.nspname !~ '^pg_toast' + AND pg_catalog.relation_is_a_known_shard(c.oid) + ORDER BY 1,2; +END; +$$; + +CREATE OR REPLACE VIEW pg_catalog.citus_shards_on_worker AS + SELECT schema_name as "Schema", + shard_name as "Name", + table_type as "Type", + owner_name as "Owner" + FROM pg_catalog.citus_shards_on_worker() s; diff --git a/src/backend/distributed/sql/udfs/citus_shards_on_worker/latest.sql b/src/backend/distributed/sql/udfs/citus_shards_on_worker/latest.sql index 895c92ae8..dbb7498e8 100644 --- a/src/backend/distributed/sql/udfs/citus_shards_on_worker/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_shards_on_worker/latest.sql @@ -5,7 +5,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_shards_on_worker( OUT owner_name name) RETURNS SETOF record LANGUAGE plpgsql - SET citus.hide_shards_from_app_name_prefixes = '' + SET citus.show_shards_for_app_name_prefixes = '*' AS $$ BEGIN -- this is the query that \d produces, except pg_table_is_visible diff --git a/src/backend/distributed/sql/udfs/run_command_on_coordinator/11.0-2.sql b/src/backend/distributed/sql/udfs/run_command_on_coordinator/11.0-2.sql new file mode 100644 index 000000000..d8fb3c35c --- /dev/null +++ b/src/backend/distributed/sql/udfs/run_command_on_coordinator/11.0-2.sql @@ -0,0 +1,59 @@ +-- run_command_on_coordinator tries to closely follow the semantics of run_command_on_all_nodes, +-- but only runs the command on the coordinator +CREATE FUNCTION pg_catalog.run_command_on_coordinator(command text, give_warning_for_connection_errors bool default false, + OUT nodeid int, OUT success bool, OUT result text) + RETURNS SETOF record + LANGUAGE plpgsql + AS $function$ +DECLARE + nodenames text[]; + ports int[]; + commands text[]; + coordinator_is_in_metadata boolean; + parallel boolean := false; +BEGIN + WITH citus_nodes AS ( + SELECT * FROM pg_dist_node + WHERE isactive AND nodecluster = current_setting('citus.cluster_name') AND groupid = 0 + AND ( + (current_setting('citus.use_secondary_nodes') = 'never' AND noderole = 'primary') + OR + (current_setting('citus.use_secondary_nodes') = 'always' AND noderole = 'secondary') + ) + ORDER BY nodename, nodeport + ) + SELECT array_agg(citus_nodes.nodename), array_agg(citus_nodes.nodeport), array_agg(command), count(*) > 0 + FROM citus_nodes + INTO nodenames, ports, commands, coordinator_is_in_metadata; + + IF NOT coordinator_is_in_metadata THEN + -- This will happen when we call this function on coordinator and + -- the coordinator is not added to the metadata. + -- We'll manually add current node to the lists to actually run on all nodes. + -- But when the coordinator is not added to metadata and this function + -- is called from a worker node, this will not be enough and we'll + -- not be able run on all nodes. + IF citus_is_coordinator() THEN + SELECT + array_append(nodenames, current_setting('citus.local_hostname')), + array_append(ports, current_setting('port')::int), + array_append(commands, command) + INTO nodenames, ports, commands; + ELSE + RAISE EXCEPTION 'the coordinator is not added to the metadata' + USING HINT = 'Add the node as a coordinator by using: SELECT citus_set_coordinator_host('''')'; + END IF; + END IF; + + FOR nodeid, success, result IN + SELECT coalesce(pg_dist_node.nodeid, 0) AS nodeid, mrow.success, mrow.result + FROM master_run_on_worker(nodenames, ports, commands, parallel) mrow + LEFT JOIN pg_dist_node ON mrow.node_name = pg_dist_node.nodename AND mrow.node_port = pg_dist_node.nodeport + LOOP + IF give_warning_for_connection_errors AND NOT success THEN + RAISE WARNING 'Error on node with node id %: %', nodeid, result; + END IF; + RETURN NEXT; + END LOOP; +END; +$function$; diff --git a/src/backend/distributed/sql/udfs/run_command_on_coordinator/latest.sql b/src/backend/distributed/sql/udfs/run_command_on_coordinator/latest.sql new file mode 100644 index 000000000..d8fb3c35c --- /dev/null +++ b/src/backend/distributed/sql/udfs/run_command_on_coordinator/latest.sql @@ -0,0 +1,59 @@ +-- run_command_on_coordinator tries to closely follow the semantics of run_command_on_all_nodes, +-- but only runs the command on the coordinator +CREATE FUNCTION pg_catalog.run_command_on_coordinator(command text, give_warning_for_connection_errors bool default false, + OUT nodeid int, OUT success bool, OUT result text) + RETURNS SETOF record + LANGUAGE plpgsql + AS $function$ +DECLARE + nodenames text[]; + ports int[]; + commands text[]; + coordinator_is_in_metadata boolean; + parallel boolean := false; +BEGIN + WITH citus_nodes AS ( + SELECT * FROM pg_dist_node + WHERE isactive AND nodecluster = current_setting('citus.cluster_name') AND groupid = 0 + AND ( + (current_setting('citus.use_secondary_nodes') = 'never' AND noderole = 'primary') + OR + (current_setting('citus.use_secondary_nodes') = 'always' AND noderole = 'secondary') + ) + ORDER BY nodename, nodeport + ) + SELECT array_agg(citus_nodes.nodename), array_agg(citus_nodes.nodeport), array_agg(command), count(*) > 0 + FROM citus_nodes + INTO nodenames, ports, commands, coordinator_is_in_metadata; + + IF NOT coordinator_is_in_metadata THEN + -- This will happen when we call this function on coordinator and + -- the coordinator is not added to the metadata. + -- We'll manually add current node to the lists to actually run on all nodes. + -- But when the coordinator is not added to metadata and this function + -- is called from a worker node, this will not be enough and we'll + -- not be able run on all nodes. + IF citus_is_coordinator() THEN + SELECT + array_append(nodenames, current_setting('citus.local_hostname')), + array_append(ports, current_setting('port')::int), + array_append(commands, command) + INTO nodenames, ports, commands; + ELSE + RAISE EXCEPTION 'the coordinator is not added to the metadata' + USING HINT = 'Add the node as a coordinator by using: SELECT citus_set_coordinator_host('''')'; + END IF; + END IF; + + FOR nodeid, success, result IN + SELECT coalesce(pg_dist_node.nodeid, 0) AS nodeid, mrow.success, mrow.result + FROM master_run_on_worker(nodenames, ports, commands, parallel) mrow + LEFT JOIN pg_dist_node ON mrow.node_name = pg_dist_node.nodename AND mrow.node_port = pg_dist_node.nodeport + LOOP + IF give_warning_for_connection_errors AND NOT success THEN + RAISE WARNING 'Error on node with node id %: %', nodeid, result; + END IF; + RETURN NEXT; + END LOOP; +END; +$function$; diff --git a/src/backend/distributed/sql/udfs/start_metadata_sync_to_all_nodes/11.0-2.sql b/src/backend/distributed/sql/udfs/start_metadata_sync_to_all_nodes/11.0-2.sql new file mode 100644 index 000000000..ca886fb9a --- /dev/null +++ b/src/backend/distributed/sql/udfs/start_metadata_sync_to_all_nodes/11.0-2.sql @@ -0,0 +1,9 @@ +CREATE OR REPLACE FUNCTION pg_catalog.start_metadata_sync_to_all_nodes() + RETURNS bool + LANGUAGE C + STRICT +AS 'MODULE_PATHNAME', $$start_metadata_sync_to_all_nodes$$; +COMMENT ON FUNCTION pg_catalog.start_metadata_sync_to_all_nodes() + IS 'sync metadata to all active primary nodes'; + +REVOKE ALL ON FUNCTION pg_catalog.start_metadata_sync_to_all_nodes() FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/start_metadata_sync_to_all_nodes/latest.sql b/src/backend/distributed/sql/udfs/start_metadata_sync_to_all_nodes/latest.sql new file mode 100644 index 000000000..ca886fb9a --- /dev/null +++ b/src/backend/distributed/sql/udfs/start_metadata_sync_to_all_nodes/latest.sql @@ -0,0 +1,9 @@ +CREATE OR REPLACE FUNCTION pg_catalog.start_metadata_sync_to_all_nodes() + RETURNS bool + LANGUAGE C + STRICT +AS 'MODULE_PATHNAME', $$start_metadata_sync_to_all_nodes$$; +COMMENT ON FUNCTION pg_catalog.start_metadata_sync_to_all_nodes() + IS 'sync metadata to all active primary nodes'; + +REVOKE ALL ON FUNCTION pg_catalog.start_metadata_sync_to_all_nodes() FROM PUBLIC; diff --git a/src/backend/distributed/test/deparse_shard_query.c b/src/backend/distributed/test/deparse_shard_query.c index 1961ad52d..a6196146f 100644 --- a/src/backend/distributed/test/deparse_shard_query.c +++ b/src/backend/distributed/test/deparse_shard_query.c @@ -49,9 +49,9 @@ deparse_shard_query_test(PG_FUNCTION_ARGS) Node *parsetree = NULL; foreach_ptr(parsetree, parseTreeList) { - List *queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, - queryStringChar, - NULL, 0, NULL); + List *queryTreeList = pg_analyze_and_rewrite_fixedparams((RawStmt *) parsetree, + queryStringChar, + NULL, 0, NULL); Query *query = NULL; foreach_ptr(query, queryTreeList) diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index f9afd3b68..6d769ef27 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -259,9 +259,9 @@ relation_count_in_query(PG_FUNCTION_ARGS) Node *parsetree = NULL; foreach_ptr(parsetree, parseTreeList) { - List *queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, - queryStringChar, - NULL, 0, NULL); + List *queryTreeList = pg_analyze_and_rewrite_fixedparams((RawStmt *) parsetree, + queryStringChar, + NULL, 0, NULL); Query *query = NULL; foreach_ptr(query, queryTreeList) diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index ce7784510..5a8ede316 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -20,6 +20,7 @@ #include "postgres.h" #include "distributed/pg_version_constants.h" +#include "pg_version_compat.h" #include "access/amapi.h" @@ -325,7 +326,7 @@ fake_relation_set_new_filenode(Relation rel, */ *minmulti = GetOldestMultiXactId(); - SMgrRelation srel = RelationCreateStorage(*newrnode, persistence); + SMgrRelation srel = RelationCreateStorage_compat(*newrnode, persistence, true); /* * If required, set up an init fork for an unlogged table so that it can @@ -446,20 +447,17 @@ fake_relation_size(Relation rel, ForkNumber forkNumber) uint64 nblocks = 0; - /* Open it at the smgr level if not already done */ - RelationOpenSmgr(rel); - /* InvalidForkNumber indicates returning the size for all forks */ if (forkNumber == InvalidForkNumber) { for (int i = 0; i < MAX_FORKNUM; i++) { - nblocks += smgrnblocks(rel->rd_smgr, i); + nblocks += smgrnblocks(RelationGetSmgr(rel), i); } } else { - nblocks = smgrnblocks(rel->rd_smgr, forkNumber); + nblocks = smgrnblocks(RelationGetSmgr(rel), forkNumber); } return nblocks * BLCKSZ; diff --git a/src/backend/distributed/test/shard_rebalancer.c b/src/backend/distributed/test/shard_rebalancer.c index ea770cb6e..f3640f415 100644 --- a/src/backend/distributed/test/shard_rebalancer.c +++ b/src/backend/distributed/test/shard_rebalancer.c @@ -28,7 +28,6 @@ #include "funcapi.h" #include "miscadmin.h" #include "utils/builtins.h" -#include "utils/int8.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/memutils.h" diff --git a/src/backend/distributed/test/shared_connection_counters.c b/src/backend/distributed/test/shared_connection_counters.c index e95a2ccbb..641cfd314 100644 --- a/src/backend/distributed/test/shared_connection_counters.c +++ b/src/backend/distributed/test/shared_connection_counters.c @@ -37,6 +37,29 @@ wake_up_connection_pool_waiters(PG_FUNCTION_ARGS) } +/* + * makeIntConst creates a Const Node that stores a given integer + * + * copied from backend/parser/gram.c + */ +static Node * +makeIntConst(int val, int location) +{ + A_Const *n = makeNode(A_Const); + +#if PG_VERSION_NUM >= PG_VERSION_15 + n->val.ival.type = T_Integer; + n->val.ival.ival = val; +#else + n->val.type = T_Integer; + n->val.val.ival = val; +#endif + n->location = location; + + return (Node *) n; +} + + /* * set_max_shared_pool_size is a SQL * interface for setting MaxSharedPoolSize. We use this function in isolation @@ -49,9 +72,8 @@ set_max_shared_pool_size(PG_FUNCTION_ARGS) AlterSystemStmt *alterSystemStmt = palloc0(sizeof(AlterSystemStmt)); - A_Const *aConstValue = makeNode(A_Const); + A_Const *aConstValue = castNode(A_Const, makeIntConst(value, 0)); - aConstValue->val = *makeInteger(value); alterSystemStmt->setstmt = makeNode(VariableSetStmt); alterSystemStmt->setstmt->name = "citus.max_shared_pool_size"; alterSystemStmt->setstmt->is_local = false; diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 925071c35..36a09c263 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -78,6 +78,19 @@ typedef struct BackendManagementShmemData BackendData backends[FLEXIBLE_ARRAY_MEMBER]; } BackendManagementShmemData; +/* + * CitusBackendType reflects what type of backend we are in. This + * can change depending on the application_name. + */ +typedef enum CitusBackendType +{ + CITUS_BACKEND_NOT_ASSIGNED, + CITUS_INTERNAL_BACKEND, + CITUS_REBALANCER_BACKEND, + CITUS_RUN_COMMAND_BACKEND, + EXTERNAL_CLIENT_BACKEND +} CitusBackendType; + static void StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescriptor); @@ -88,10 +101,12 @@ static uint64 GenerateGlobalPID(void); static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static BackendManagementShmemData *backendManagementShmemData = NULL; static BackendData *MyBackendData = NULL; +static CitusBackendType CurrentBackendType = CITUS_BACKEND_NOT_ASSIGNED; static void BackendManagementShmemInit(void); static size_t BackendManagementShmemSize(void); +static void DetermineCitusBackendType(void); PG_FUNCTION_INFO_V1(assign_distributed_transaction_id); @@ -1278,3 +1293,87 @@ DecrementExternalClientBackendCounter(void) { pg_atomic_sub_fetch_u32(&backendManagementShmemData->externalClientBackendCounter, 1); } + + +/* + * ResetCitusBackendType resets the backend type cache. + */ +void +ResetCitusBackendType(void) +{ + CurrentBackendType = CITUS_BACKEND_NOT_ASSIGNED; +} + + +/* + * IsRebalancerInitiatedBackend returns true if we are in a backend that citus + * rebalancer initiated. + */ +bool +IsRebalancerInternalBackend(void) +{ + if (CurrentBackendType == CITUS_BACKEND_NOT_ASSIGNED) + { + DetermineCitusBackendType(); + } + + return CurrentBackendType == CITUS_REBALANCER_BACKEND; +} + + +/* + * IsCitusInitiatedRemoteBackend returns true if we are in a backend that citus + * initiated via remote connection. + */ +bool +IsCitusInternalBackend(void) +{ + if (CurrentBackendType == CITUS_BACKEND_NOT_ASSIGNED) + { + DetermineCitusBackendType(); + } + + return CurrentBackendType == CITUS_INTERNAL_BACKEND; +} + + +/* + * IsCitusRunCommandBackend returns true if we are in a backend that one of + * the run_command_on_* functions initiated. + */ +bool +IsCitusRunCommandBackend(void) +{ + if (CurrentBackendType == CITUS_BACKEND_NOT_ASSIGNED) + { + DetermineCitusBackendType(); + } + + return CurrentBackendType == CITUS_RUN_COMMAND_BACKEND; +} + + +/* + * DetermineCitusBackendType determines the type of backend based on the application_name. + */ +static void +DetermineCitusBackendType(void) +{ + if (ExtractGlobalPID(application_name) != INVALID_CITUS_INTERNAL_BACKEND_GPID) + { + CurrentBackendType = CITUS_INTERNAL_BACKEND; + } + else if (application_name && strcmp(application_name, CITUS_REBALANCER_NAME) == 0) + { + CurrentBackendType = CITUS_REBALANCER_BACKEND; + } + else if (application_name && + strcmp(application_name, CITUS_RUN_COMMAND_APPLICATION_NAME) == 0) + { + CurrentBackendType = CITUS_RUN_COMMAND_BACKEND; + } + else + { + CurrentBackendType = EXTERNAL_CLIENT_BACKEND; + } +} diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 62b5e4e04..e672dafd8 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -309,7 +309,7 @@ ParseIntField(PGresult *result, int rowIndex, int colIndex) char *resultString = PQgetvalue(result, rowIndex, colIndex); - return pg_strtouint64(resultString, NULL, 10); + return strtou64(resultString, NULL, 10); } diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 2859ec4c9..55a560575 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -1408,7 +1408,7 @@ ParsePreparedTransactionName(char *preparedTransactionName, /* step ahead of the current '_' character */ ++currentCharPointer; - *transactionNumber = pg_strtouint64(currentCharPointer, NULL, 10); + *transactionNumber = strtou64(currentCharPointer, NULL, 10); if ((*transactionNumber == 0 && errno != 0) || (*transactionNumber == ULLONG_MAX && errno == ERANGE)) { diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 0337411cb..a46a5f198 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -40,6 +40,7 @@ #include "distributed/version_compat.h" #include "distributed/worker_log_messages.h" #include "distributed/commands.h" +#include "distributed/metadata_cache.h" #include "utils/hsearch.h" #include "utils/guc.h" #include "utils/memutils.h" @@ -205,8 +206,17 @@ InCoordinatedTransaction(void) void Use2PCForCoordinatedTransaction(void) { - Assert(InCoordinatedTransaction()); - + /* + * If this transaction is also a coordinated + * transaction, use 2PC. Otherwise, this + * state change does nothing. + * + * In other words, when this flag is set, + * we "should" use 2PC when needed (e.g., + * we are in a coordinated transaction and + * the coordinated transaction does a remote + * modification). + */ ShouldCoordinatedTransactionUse2PC = true; } @@ -309,6 +319,17 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) /* empty the CommitContext to ensure we're not leaking memory */ MemoryContextSwitchTo(previousContext); MemoryContextReset(CommitContext); + + /* Set CreateCitusTransactionLevel to 0 since original transaction is about to be + * committed. + */ + + if (GetCitusCreationLevel() > 0) + { + /* Check CitusCreationLevel was correctly decremented to 1 */ + Assert(GetCitusCreationLevel() == 1); + SetCreateCitusTransactionLevel(0); + } break; } @@ -353,6 +374,20 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) ResetGlobalVariables(); + /* + * Clear MetadataCache table if we're aborting from a CREATE EXTENSION Citus + * so that any created OIDs from the table are cleared and invalidated. We + * also set CreateCitusTransactionLevel to 0 since that process has been aborted + */ + if (GetCitusCreationLevel() > 0) + { + /* Checks CitusCreationLevel correctly decremented to 1 */ + Assert(GetCitusCreationLevel() == 1); + + InvalidateMetadataSystemCache(); + SetCreateCitusTransactionLevel(0); + } + /* * Make sure that we give the shared connections back to the shared * pool if any. This operation is a no-op if the reserved connections @@ -556,6 +591,7 @@ ResetGlobalVariables() TransactionModifiedNodeMetadata = false; NodeMetadataSyncOnCommit = false; InTopLevelDelegatedFunctionCall = false; + InTableTypeConversionFunctionCall = false; ResetWorkerErrorIndication(); memset(&AllowedDistributionColumnValue, 0, sizeof(AllowedDistributionColumn)); @@ -599,6 +635,13 @@ CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId, CoordinatedRemoteTransactionsSavepointRelease(subId); } PopSubXact(subId); + + /* Set CachedDuringCitusCreation to one level lower to represent citus creation is done */ + + if (GetCitusCreationLevel() == GetCurrentTransactionNestLevel()) + { + SetCreateCitusTransactionLevel(GetCitusCreationLevel() - 1); + } break; } @@ -621,6 +664,16 @@ CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId, } PopSubXact(subId); + /* + * Clear MetadataCache table if we're aborting from a CREATE EXTENSION Citus + * so that any created OIDs from the table are cleared and invalidated. We + * also set CreateCitusTransactionLevel to 0 since subtransaction has been aborted + */ + if (GetCitusCreationLevel() == GetCurrentTransactionNestLevel()) + { + InvalidateMetadataSystemCache(); + SetCreateCitusTransactionLevel(0); + } break; } diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 69a318023..6b4b1a351 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -149,7 +149,7 @@ List * TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) { List *workerNodeList = NIL; - if (targetWorkerSet == ALL_SHARD_NODES) + if (targetWorkerSet == ALL_SHARD_NODES || targetWorkerSet == METADATA_NODES) { workerNodeList = ActivePrimaryNodeList(lockMode); } @@ -162,7 +162,9 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerNodeList) { - if (targetWorkerSet == NON_COORDINATOR_METADATA_NODES && !workerNode->hasMetadata) + if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet == + METADATA_NODES) && + !workerNode->hasMetadata) { continue; } @@ -488,27 +490,70 @@ SendCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort, * coordinated transaction. Any failures aborts the coordinated transaction. */ void -SendMetadataCommandListToWorkerInCoordinatedTransaction(const char *nodeName, - int32 nodePort, - const char *nodeUser, - List *commandList) +SendMetadataCommandListToWorkerListInCoordinatedTransaction(List *workerNodeList, + const char *nodeUser, + List *commandList) { - int connectionFlags = REQUIRE_METADATA_CONNECTION; + if (list_length(commandList) == 0 || list_length(workerNodeList) == 0) + { + /* nothing to do */ + return; + } UseCoordinatedTransaction(); - MultiConnection *workerConnection = GetNodeUserDatabaseConnection(connectionFlags, - nodeName, nodePort, - nodeUser, NULL); + List *connectionList = NIL; - MarkRemoteTransactionCritical(workerConnection); - RemoteTransactionBeginIfNecessary(workerConnection); - - /* iterate over the commands and execute them in the same connection */ - const char *commandString = NULL; - foreach_ptr(commandString, commandList) + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerNodeList) { - ExecuteCriticalRemoteCommand(workerConnection, commandString); + const char *nodeName = workerNode->workerName; + int nodePort = workerNode->workerPort; + int connectionFlags = REQUIRE_METADATA_CONNECTION; + + MultiConnection *connection = + StartNodeConnection(connectionFlags, nodeName, nodePort); + + MarkRemoteTransactionCritical(connection); + + /* + * connection can only be NULL for optional connections, which we don't + * support in this codepath. + */ + Assert((connectionFlags & OPTIONAL_CONNECTION) == 0); + Assert(connection != NULL); + connectionList = lappend(connectionList, connection); + } + + FinishConnectionListEstablishment(connectionList); + + /* must open transaction blocks to use intermediate results */ + RemoteTransactionsBeginIfNecessary(connectionList); + + /* + * In order to avoid round-trips per query in queryStringList, + * we join the string and send as a single command. Also, + * if there is only a single command, avoid additional call to + * StringJoin given that some strings can be quite large. + */ + char *stringToSend = (list_length(commandList) == 1) ? + linitial(commandList) : StringJoin(commandList, ';'); + + /* send commands in parallel */ + bool failOnError = true; + MultiConnection *connection = NULL; + foreach_ptr(connection, connectionList) + { + int querySent = SendRemoteCommand(connection, stringToSend); + if (querySent == 0) + { + ReportConnectionError(connection, ERROR); + } + } + + foreach_ptr(connection, connectionList) + { + ClearResults(connection, failOnError); } } diff --git a/src/backend/distributed/utils/listutils.c b/src/backend/distributed/utils/listutils.c index 9fb624233..ce2920748 100644 --- a/src/backend/distributed/utils/listutils.c +++ b/src/backend/distributed/utils/listutils.c @@ -245,3 +245,24 @@ GenerateListFromElement(void *listElement, int listLength) return list; } + + +/* + * list_filter_oid filters a list of oid-s based on a keepElement + * function + */ +List * +list_filter_oid(List *list, bool (*keepElement)(Oid element)) +{ + List *result = NIL; + Oid element = InvalidOid; + foreach_oid(element, list) + { + if (keepElement(element)) + { + result = lappend_oid(result, element); + } + } + + return result; +} diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index a7477e5e5..e11bc5419 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -917,7 +917,7 @@ try_relation_open_nolock(Oid relationId) return NULL; } - pgstat_initstats(relation); + pgstat_init_relation(relation); return relation; } diff --git a/src/backend/distributed/utils/param_utils.c b/src/backend/distributed/utils/param_utils.c new file mode 100644 index 000000000..8aefecb7d --- /dev/null +++ b/src/backend/distributed/utils/param_utils.c @@ -0,0 +1,88 @@ +/*------------------------------------------------------------------------- + * + * param_utils.c + * Utilities to process paramaters. + * + * Copyright (c) Citus Data, Inc. + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include +#include +#include +#include +#include +#include +#include "distributed/param_utils.h" + +/* + * IsExternParamUsedInQuery returns true if the passed in paramId + * is used in the query, false otherwise. + */ +bool +GetParamsUsedInQuery(Node *expression, Bitmapset **paramBitmap) +{ + if (expression == NULL) + { + return false; + } + + if (IsA(expression, Param)) + { + Param *param = (Param *) expression; + int paramId = param->paramid; + + /* only care about user supplied parameters */ + if (param->paramkind != PARAM_EXTERN) + { + return false; + } + + /* Found a parameter, mark it in the bitmap and continue */ + *paramBitmap = bms_add_member(*paramBitmap, paramId); + + /* Continue searching */ + return false; + } + + /* keep traversing */ + if (IsA(expression, Query)) + { + return query_tree_walker((Query *) expression, + GetParamsUsedInQuery, + paramBitmap, + 0); + } + else + { + return expression_tree_walker(expression, + GetParamsUsedInQuery, + paramBitmap); + } +} + + +/* + * MarkUnreferencedExternParams marks parameter's type to zero if the + * parameter is not used in the query. + */ +void +MarkUnreferencedExternParams(Node *expression, ParamListInfo boundParams) +{ + int parameterCount = boundParams->numParams; + Bitmapset *paramBitmap = NULL; + + /* Fetch all parameters used in the query */ + GetParamsUsedInQuery(expression, ¶mBitmap); + + /* Check for any missing parameters */ + for (int parameterNum = 1; parameterNum <= parameterCount; parameterNum++) + { + if (!bms_is_member(parameterNum, paramBitmap)) + { + boundParams->params[parameterNum - 1].ptype = 0; + } + } +} diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index 70f56119c..5c6775c80 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -356,9 +356,10 @@ ReplicateReferenceTableShardToNode(ShardInterval *shardInterval, char *nodeName, /* send commands to new workers, the current user should be a superuser */ Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, nodePort, - CurrentUserName(), - ddlCommandList); + WorkerNode *workerNode = FindWorkerNode(nodeName, nodePort); + SendMetadataCommandListToWorkerListInCoordinatedTransaction(list_make1(workerNode), + CurrentUserName(), + ddlCommandList); int32 groupId = GroupForNode(nodeName, nodePort); uint64 placementId = GetNextPlacementId(); @@ -599,9 +600,8 @@ ReplicateAllReferenceTablesToNode(WorkerNode *workerNode) /* send commands to new workers, the current user should be a superuser */ Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction( - workerNode->workerName, - workerNode->workerPort, + SendMetadataCommandListToWorkerListInCoordinatedTransaction( + list_make1(workerNode), CurrentUserName(), commandList); } diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index a92426f5b..3a4beef17 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -21,6 +21,7 @@ #include "catalog/namespace.h" #include "commands/tablecmds.h" #include "distributed/colocation_utils.h" +#include "distributed/commands.h" #include "distributed/listutils.h" #include "distributed/metadata_utility.h" #include "distributed/coordinator_protocol.h" @@ -38,11 +39,15 @@ #include "distributed/worker_protocol.h" #include "distributed/utils/array_type.h" #include "distributed/version_compat.h" +#include "distributed/local_executor.h" +#include "distributed/worker_shard_visibility.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/varlena.h" +#define LOCK_RELATION_IF_EXISTS \ + "SELECT pg_catalog.lock_relation_if_exists(%s, %s);" /* static definition and declarations */ struct LockModeToStringType @@ -69,6 +74,17 @@ static const struct LockModeToStringType lockmode_to_string_map[] = { static const int lock_mode_to_string_map_count = sizeof(lockmode_to_string_map) / sizeof(lockmode_to_string_map[0]); +/* + * LockRelationRecord holds the oid of a relation to be locked + * and a boolean inh to determine whether its decendants + * should be locked as well + */ +typedef struct LockRelationRecord +{ + Oid relationId; + bool inh; +} LockRelationRecord; + /* local function forward declarations */ static LOCKMODE IntToLockMode(int mode); @@ -90,6 +106,8 @@ PG_FUNCTION_INFO_V1(lock_shard_metadata); PG_FUNCTION_INFO_V1(lock_shard_resources); PG_FUNCTION_INFO_V1(lock_relation_if_exists); +/* Config variable managed via guc.c */ +bool EnableAcquiringUnsafeLockFromWorkers = false; /* * lock_shard_metadata allows the shard distribution metadata to be locked @@ -975,9 +993,6 @@ lock_relation_if_exists(PG_FUNCTION_ARGS) text *lockModeText = PG_GETARG_TEXT_P(1); char *lockModeCString = text_to_cstring(lockModeText); - /* ensure that we're in a transaction block */ - RequireTransactionBlock(true, "lock_relation_if_exists"); - /* get the lock mode */ LOCKMODE lockMode = LockModeTextToLockMode(lockModeCString); @@ -1060,3 +1075,355 @@ CitusLockTableAclCheck(Oid relationId, LOCKMODE lockmode, Oid userId) return aclResult; } + + +/* + * EnsureCanAcquireLock checks if currect user has the permissions + * to acquire a lock on the table and throws an error if the user does + * not have the permissions + */ +static void +EnsureCanAcquireLock(Oid relationId, LOCKMODE lockMode) +{ + AclResult aclResult = CitusLockTableAclCheck(relationId, lockMode, + GetUserId()); + if (aclResult != ACLCHECK_OK) + { + aclcheck_error(aclResult, + get_relkind_objtype(get_rel_relkind(relationId)), + get_rel_name(relationId)); + } +} + + +/* + * CreateLockTerminationString creates a string that can be appended to the + * end of a partial lock command to properly terminate the command + */ +static const char * +CreateLockTerminationString(const char *lockModeText, bool nowait) +{ + StringInfo lockTerminationStringInfo = makeStringInfo(); + appendStringInfo(lockTerminationStringInfo, nowait ? " IN %s MODE NOWAIT;\n" : + " IN %s MODE;\n", lockModeText); + return lockTerminationStringInfo->data; +} + + +/* + * FinishLockCommandIfNecessary appends the lock termination string if the lock command is partial. + * Sets the partialLockCommand flag to false + */ +static void +FinishLockCommandIfNecessary(StringInfo lockCommand, const char *lockTerminationString, + bool *partialLockCommand) +{ + if (*partialLockCommand) + { + appendStringInfo(lockCommand, "%s", lockTerminationString); + } + + *partialLockCommand = false; +} + + +/* + * LockRelationRecordListMember checks if a relation id is present in the + * LockRelationRecord list + */ +static bool +LockRelationRecordListMember(List *lockRelationRecordList, Oid relationId) +{ + LockRelationRecord *record = NULL; + foreach_ptr(record, lockRelationRecordList) + { + if (record->relationId == relationId) + { + return true; + } + } + + return false; +} + + +/* + * MakeLockRelationRecord makes a LockRelationRecord using the relation oid + * and the inh boolean while properly allocating the structure + */ +static LockRelationRecord * +MakeLockRelationRecord(Oid relationId, bool inh) +{ + LockRelationRecord *lockRelationRecord = palloc(sizeof(LockRelationRecord)); + lockRelationRecord->relationId = relationId; + lockRelationRecord->inh = inh; + return lockRelationRecord; +} + + +/* + * ConcatLockRelationRecordList concats a list of LockRelationRecord with + * another list of LockRelationRecord created from a list of relation oid-s + * which are not present in the first list and an inh bool which will be + * applied across all LockRelationRecords + */ +static List * +ConcatLockRelationRecordList(List *lockRelationRecordList, List *relationOidList, bool + inh) +{ + List *constructedList = NIL; + + Oid relationId = InvalidOid; + foreach_oid(relationId, relationOidList) + { + if (!LockRelationRecordListMember(lockRelationRecordList, relationId)) + { + LockRelationRecord *record = MakeLockRelationRecord(relationId, inh); + constructedList = lappend(constructedList, (void *) record); + } + } + + return list_concat(lockRelationRecordList, constructedList); +} + + +/* + * AcquireDistributedLockOnRelations_Internal acquire a distributed lock on worker nodes + * for given list of relations ids. Worker node list is sorted so that the lock + * is acquired in the same order regardless of which node it was run on. + * + * A nowait flag is used to require the locks to be available immediately + * and if that is not the case, an error will be thrown + * + * Notice that no validation or filtering is done on the relationIds, that is the responsibility + * of this function's caller. + */ +static void +AcquireDistributedLockOnRelations_Internal(List *lockRelationRecordList, + LOCKMODE lockMode, bool nowait) +{ + const char *lockModeText = LockModeToLockModeText(lockMode); + + UseCoordinatedTransaction(); + + StringInfo lockRelationsCommand = makeStringInfo(); + appendStringInfo(lockRelationsCommand, "%s;\n", DISABLE_DDL_PROPAGATION); + + /* + * In the following loop, when there are foreign tables, we need to switch from + * LOCK * statement to lock_relation_if_exists() and vice versa since pg + * does not support using LOCK on foreign tables. + */ + bool startedLockCommand = false; + + /* create a lock termination string used to terminate a partial lock command */ + const char *lockTerminationString = CreateLockTerminationString(lockModeText, nowait); + + int lockedRelations = 0; + LockRelationRecord *lockRelationRecord; + foreach_ptr(lockRelationRecord, lockRelationRecordList) + { + Oid relationId = lockRelationRecord->relationId; + bool lockDescendants = lockRelationRecord->inh; + char *qualifiedRelationName = generate_qualified_relation_name(relationId); + + /* + * As of pg14 we cannot use LOCK to lock a FOREIGN TABLE + * so we have to use the lock_relation_if_exist udf + */ + if (get_rel_relkind(relationId) == RELKIND_FOREIGN_TABLE) + { + FinishLockCommandIfNecessary(lockRelationsCommand, lockTerminationString, + &startedLockCommand); + + /* + * The user should not be able to trigger this codepath + * with nowait = true or lockDescendants = false since the + * only way to do that is calling LOCK * IN * MODE NOWAIT; + * and LOCK ONLY * IN * MODE; respectively but foreign tables + * cannot be locked with LOCK command as of pg14 + */ + Assert(nowait == false); + Assert(lockDescendants == true); + + /* use lock_relation_if_exits to lock foreign table */ + appendStringInfo(lockRelationsCommand, LOCK_RELATION_IF_EXISTS, + quote_literal_cstr(qualifiedRelationName), + quote_literal_cstr(lockModeText)); + appendStringInfoChar(lockRelationsCommand, '\n'); + } + else if (startedLockCommand) + { + /* append relation to partial lock statement */ + appendStringInfo(lockRelationsCommand, ",%s%s", + lockDescendants ? " " : " ONLY ", + qualifiedRelationName); + } + else + { + /* start a new partial lock statement */ + appendStringInfo(lockRelationsCommand, "LOCK%s%s", + lockDescendants ? " " : " ONLY ", + qualifiedRelationName); + startedLockCommand = true; + } + + lockedRelations++; + } + + if (lockedRelations == 0) + { + return; + } + + FinishLockCommandIfNecessary(lockRelationsCommand, lockTerminationString, + &startedLockCommand); + + appendStringInfo(lockRelationsCommand, ENABLE_DDL_PROPAGATION); + char *lockCommand = lockRelationsCommand->data; + + List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES, NoLock); + + /* + * We want to acquire locks in the same order across the nodes. + * Although relation ids may change, their ordering will not. + */ + workerNodeList = SortList(workerNodeList, CompareWorkerNodes); + + int32 localGroupId = GetLocalGroupId(); + + WorkerNode *workerNode = NULL; + const char *currentUser = CurrentUserName(); + foreach_ptr(workerNode, workerNodeList) + { + /* if local node is one of the targets, acquire the lock locally */ + if (workerNode->groupId == localGroupId) + { + ExecuteUtilityCommand(lockCommand); + continue; + } + + SendMetadataCommandListToWorkerListInCoordinatedTransaction(list_make1( + workerNode), + currentUser, + list_make1( + lockCommand)); + } +} + + +/* + * AcquireDistributedLockOnRelations filters relations before passing them to + * AcquireDistributedLockOnRelations_Internal to acquire the locks. + * + * Only tables, views, and foreign tables can be locked with this function. Other relations + * will cause an error. + * + * Skips non distributed relations. + * configs parameter is used to configure what relation will be locked and if the lock + * should throw an error if it cannot be acquired immediately + */ +void +AcquireDistributedLockOnRelations(List *relationList, LOCKMODE lockMode, uint32 configs) +{ + if (!ClusterHasKnownMetadataWorkers() || !EnableMetadataSync || !EnableDDLPropagation) + { + return; + } + + /* used to store the relations that will be locked */ + List *distributedRelationRecordsList = NIL; + + bool nowait = (configs & DIST_LOCK_NOWAIT) > 0; + + RangeVar *rangeVar = NULL; + foreach_ptr(rangeVar, relationList) + { + Oid relationId = RangeVarGetRelid(rangeVar, NoLock, false); + + LockRelationRecord *lockRelationRecord = MakeLockRelationRecord(relationId, + rangeVar->inh); + + /* + * Note that allowing the user to lock shards could lead to + * distributed deadlocks due to shards not being locked when + * a distributed table is locked. + * However, because citus.enable_manual_changes_to_shards + * is a guc which is not visible by default, whoever is using this + * guc will hopefully know what they're doing and avoid such scenarios. + */ + ErrorIfIllegallyChangingKnownShard(relationId); + + /* + * we want to prevent under privileged users to trigger establishing connections, + * that's why we have this additional check. Otherwise, we'd get the errors as + * soon as we execute the command on any node + */ + EnsureCanAcquireLock(relationId, lockMode); + + bool isView = get_rel_relkind(relationId) == RELKIND_VIEW; + if ((isView && !IsViewDistributed(relationId)) || + (!isView && !ShouldSyncTableMetadata(relationId))) + { + continue; + } + + if (!LockRelationRecordListMember(distributedRelationRecordsList, relationId)) + { + distributedRelationRecordsList = lappend(distributedRelationRecordsList, + (void *) lockRelationRecord); + } + + if ((configs & DIST_LOCK_REFERENCING_TABLES) > 0) + { + CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); + Assert(cacheEntry != NULL); + + List *referencingTableList = cacheEntry->referencingRelationsViaForeignKey; + + /* remove the relations which should not be synced */ + referencingTableList = list_filter_oid(referencingTableList, + &ShouldSyncTableMetadata); + + distributedRelationRecordsList = ConcatLockRelationRecordList( + distributedRelationRecordsList, referencingTableList, true); + } + } + + if (distributedRelationRecordsList != NIL) + { + if (!IsCoordinator() && !CoordinatorAddedAsWorkerNode() && + !EnableAcquiringUnsafeLockFromWorkers) + { + ereport(ERROR, + (errmsg( + "Cannot acquire a distributed lock from a worker node since the " + "coordinator is not in the metadata."), + errhint( + "Either run this command on the coordinator or add the coordinator " + "in the metadata by using: SELECT citus_set_coordinator_host('', );\n" + "Alternatively, though it is not recommended, you can allow this command by running: " + "SET citus.allow_unsafe_locks_from_workers TO 'on';"))); + } + + AcquireDistributedLockOnRelations_Internal(distributedRelationRecordsList, + lockMode, nowait); + } +} + + +/** + * PreprocessLockStatement makes sure that the lock is allowed + * before calling AcquireDistributedLockOnRelations on the locked tables/views + * with flags DIST_LOCK_VIEWS_RECUR and DIST_LOCK_NOWAIT if nowait is true for + * the lock statement + */ +void +PreprocessLockStatement(LockStmt *stmt, ProcessUtilityContext context) +{ + bool isTopLevel = context == PROCESS_UTILITY_TOPLEVEL; + RequireTransactionBlock(isTopLevel, "LOCK TABLE"); + + uint32 nowaitFlag = stmt->nowait ? DIST_LOCK_NOWAIT : 0; + AcquireDistributedLockOnRelations(stmt->relations, stmt->mode, nowaitFlag); +} diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index 5e78c19ce..cbc7af89a 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -387,7 +387,7 @@ ExtractShardIdFromTableName(const char *tableName, bool missingOk) shardIdString++; errno = 0; - uint64 shardId = pg_strtouint64(shardIdString, &shardIdStringEnd, 0); + uint64 shardId = strtou64(shardIdString, &shardIdStringEnd, 0); if (errno != 0 || (*shardIdStringEnd != '\0')) { diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index da9c87a22..9d041f4a9 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -14,6 +14,7 @@ #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" +#include "distributed/backend_data.h" #include "distributed/metadata_cache.h" #include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" @@ -40,8 +41,8 @@ typedef enum HideShardsMode bool OverrideTableVisibility = true; bool EnableManualChangesToShards = false; -/* hide shards when the application_name starts with one of: */ -char *HideShardsFromAppNamePrefixes = "*"; +/* show shards when the application_name starts with one of: */ +char *ShowShardsForAppNamePrefixes = ""; /* cache of whether or not to hide shards */ static HideShardsMode HideShards = CHECK_APPLICATION_NAME; @@ -152,8 +153,10 @@ ErrorIfRelationIsAKnownShard(Oid relationId) void ErrorIfIllegallyChangingKnownShard(Oid relationId) { - if (LocalExecutorLevel > 0 || - (IsCitusInternalBackend() || IsRebalancerInternalBackend()) || + /* allow Citus to make changes, and allow the user if explicitly enabled */ + if (LocalExecutorShardId != INVALID_SHARD_ID || + IsCitusInternalBackend() || + IsRebalancerInternalBackend() || EnableManualChangesToShards) { return; @@ -271,8 +274,8 @@ RelationIsAKnownShard(Oid shardRelationId) /* * HideShardsFromSomeApplications transforms queries to pg_class to - * filter out known shards if the application_name matches any of - * the prefixes in citus.hide_shards_from_app_name_prefixes. + * filter out known shards if the application_name does not match any of + * the prefixes in citus.show_shards_for_app_name_prefix. */ void HideShardsFromSomeApplications(Query *query) @@ -294,7 +297,7 @@ HideShardsFromSomeApplications(Query *query) * ShouldHideShards returns whether we should hide shards in the current * session. It only checks the application_name once and then uses a * cached response unless either the application_name or - * citus.hide_shards_from_app_name_prefixes changes. + * citus.show_shards_for_app_name_prefix changes. */ static bool ShouldHideShards(void) @@ -358,7 +361,8 @@ ShouldHideShardsInternal(void) return false; } - if (IsCitusInternalBackend() || IsRebalancerInternalBackend()) + if (IsCitusInternalBackend() || IsRebalancerInternalBackend() || + IsCitusRunCommandBackend()) { /* we never hide shards from Citus */ return false; @@ -367,32 +371,33 @@ ShouldHideShardsInternal(void) List *prefixList = NIL; /* SplitGUCList scribbles on the input */ - char *splitCopy = pstrdup(HideShardsFromAppNamePrefixes); + char *splitCopy = pstrdup(ShowShardsForAppNamePrefixes); if (!SplitGUCList(splitCopy, ',', &prefixList)) { /* invalid GUC value, ignore */ - return false; + return true; } char *appNamePrefix = NULL; foreach_ptr(appNamePrefix, prefixList) { - /* always hide shards when one of the prefixes is * */ + /* never hide shards when one of the prefixes is * */ if (strcmp(appNamePrefix, "*") == 0) { - return true; + return false; } /* compare only the first first characters */ int prefixLength = strlen(appNamePrefix); if (strncmp(application_name, appNamePrefix, prefixLength) == 0) { - return true; + return false; } } - return false; + /* default behaviour: hide shards */ + return true; } diff --git a/src/include/columnar/columnar.h b/src/include/columnar/columnar.h index 4d31a45ed..e8868808e 100644 --- a/src/include/columnar/columnar.h +++ b/src/include/columnar/columnar.h @@ -25,6 +25,7 @@ #include "columnar/columnar_compression.h" #include "columnar/columnar_metadata.h" +#define COLUMNAR_AM_NAME "columnar" #define COLUMNAR_MODULE_NAME "citus_columnar" #define COLUMNAR_SETOPTIONS_HOOK_SYM "ColumnarTableSetOptions_hook" diff --git a/src/include/columnar/columnar_metadata.h b/src/include/columnar/columnar_metadata.h index 60669a248..c17799483 100644 --- a/src/include/columnar/columnar_metadata.h +++ b/src/include/columnar/columnar_metadata.h @@ -51,5 +51,7 @@ typedef struct EmptyStripeReservation extern List * StripesForRelfilenode(RelFileNode relfilenode); extern void ColumnarStorageUpdateIfNeeded(Relation rel, bool isUpgrade); +extern List * ExtractColumnarRelOptions(List *inOptions, List **outColumnarOptions); +extern void SetColumnarRelOptions(RangeVar *rv, List *reloptions); #endif /* COLUMNAR_METADATA_H */ diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index 45b8a0e55..611b40d15 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -14,6 +14,14 @@ #include "distributed/pg_version_constants.h" +#if PG_VERSION_NUM >= PG_VERSION_15 +#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \ + ExecARDeleteTriggers(a, b, c, d, e, f) +#else +#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \ + ExecARDeleteTriggers(a, b, c, d, e) +#endif + #if PG_VERSION_NUM >= PG_VERSION_14 #define ColumnarProcessUtility_compat(a, b, c, d, e, f, g, h) \ ColumnarProcessUtility(a, b, c, d, e, f, g, h) diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index ccb4da535..62fcfede3 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -69,10 +69,10 @@ extern LocalTransactionId GetMyProcLocalTransactionId(void); extern int GetExternalClientBackendCount(void); extern uint32 IncrementExternalClientBackendCounter(void); extern void DecrementExternalClientBackendCounter(void); - -extern bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, - char *queryString, StringInfo queryResultString, - bool reportResultError); +extern bool IsCitusInternalBackend(void); +extern bool IsRebalancerInternalBackend(void); +extern bool IsCitusRunCommandBackend(void); +extern void ResetCitusBackendType(void); #define INVALID_CITUS_INTERNAL_BACKEND_GPID 0 #define GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA 99999999 diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index 03d58d031..f84307fc8 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -46,6 +46,7 @@ extern char * pg_get_indexclusterdef_string(Oid indexRelationId); extern bool contain_nextval_expression_walker(Node *node, void *context); extern char * pg_get_replica_identity_command(Oid tableRelationId); extern const char * RoleSpecString(RoleSpec *spec, bool withQuoteIdentifier); +extern char * flatten_reloptions(Oid relid); /* Function declarations for version dependent PostgreSQL ruleutils functions */ extern void pg_get_query_def(Query *query, StringInfo buffer); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 42e7703c7..b612221aa 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -63,6 +63,15 @@ typedef struct DistributeObjectOps List * (*postprocess)(Node *, const char *); ObjectAddress (*address)(Node *, bool); bool markDistributed; + + /* fields used by common implementations, omitted for specialized implementations */ + ObjectType objectType; + + /* + * Points to the varriable that contains the GUC'd feature flag, when turned off the + * common propagation functions will not propagate the creation of the object. + */ + bool *featureFlag; } DistributeObjectOps; #define CITUS_TRUNCATE_TRIGGER_NAME "citus_truncate_trigger" @@ -122,15 +131,21 @@ typedef enum SearchForeignKeyColumnFlags } SearchForeignKeyColumnFlags; -/* aggregate.c - forward declarations */ -extern List * PreprocessDefineAggregateStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessDefineAggregateStmt(Node *node, const char *queryString); - /* cluster.c - forward declarations */ extern List * PreprocessClusterStmt(Node *node, const char *clusterCommand, ProcessUtilityContext processUtilityContext); +/* common.c - forward declarations*/ +extern List * PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, + const char *queryString); +extern List * PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString); +extern List * PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); + /* index.c */ typedef void (*PGIndexProcessor)(Form_pg_index, List **, int); @@ -143,54 +158,17 @@ extern bool CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *d extern char * CreateCollationDDL(Oid collationId); extern List * CreateCollationDDLsIdempotent(Oid collationId); extern ObjectAddress AlterCollationOwnerObjectAddress(Node *stmt, bool missing_ok); -extern List * PreprocessDropCollationStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessAlterCollationOwnerStmt(Node *stmt, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PostprocessAlterCollationOwnerStmt(Node *node, const char *queryString); -extern List * PreprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessRenameCollationStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); extern ObjectAddress RenameCollationStmtObjectAddress(Node *stmt, bool missing_ok); extern ObjectAddress AlterCollationSchemaStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * PostprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString); extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address); extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * PreprocessDefineCollationStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString); /* database.c - forward declarations */ -extern List * PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString); extern ObjectAddress AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok); extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address); /* domain.c - forward declarations */ -extern List * PreprocessCreateDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessCreateDomainStmt(Node *node, const char *queryString); -extern List * PreprocessDropDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessAlterDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessAlterDomainStmt(Node *node, const char *queryString); -extern List * PreprocessDomainRenameConstraintStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterDomainOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessAlterDomainOwnerStmt(Node *node, const char *queryString); -extern List * PreprocessRenameDomainStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessAlterDomainSchemaStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessAlterDomainSchemaStmt(Node *node, const char *queryString); extern ObjectAddress CreateDomainStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterDomainStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress DomainRenameConstraintStmtObjectAddress(Node *node, @@ -229,6 +207,7 @@ extern ObjectAddress AlterExtensionUpdateStmtObjectAddress(Node *stmt, bool missing_ok); extern void CreateExtensionWithVersion(char *extname, char *extVersion); extern void AlterExtensionUpdateStmt(char *extname, char *extVersion); +extern double GetExtensionVersionNumber(char *extVersion); /* foreign_constraint.c - forward declarations */ extern bool ConstraintIsAForeignKeyToReferenceTable(char *constraintName, @@ -267,23 +246,9 @@ extern Oid GetReferencingTableId(Oid foreignKeyId); extern bool RelationInvolvedInAnyNonInheritedForeignKeys(Oid relationId); /* foreign_server.c - forward declarations */ -extern List * PreprocessCreateForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessRenameForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessDropForeignServerStmt(Node *node, const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PostprocessCreateForeignServerStmt(Node *node, const char *queryString); -extern List * PostprocessAlterForeignServerOwnerStmt(Node *node, const char *queryString); extern ObjectAddress CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterForeignServerOwnerStmtObjectAddress(Node *node, bool missing_ok); extern List * GetForeignServerCreateDDLCommand(Oid serverId); @@ -308,24 +273,12 @@ extern List * PreprocessAlterFunctionStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern ObjectAddress AlterFunctionStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * PreprocessRenameFunctionStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); extern ObjectAddress RenameFunctionStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * PreprocessAlterFunctionOwnerStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessAlterFunctionOwnerStmt(Node *stmt, const char *queryString); extern ObjectAddress AlterFunctionOwnerObjectAddress(Node *stmt, bool missing_ok); -extern List * PreprocessAlterFunctionSchemaStmt(Node *stmt, const char *queryString, - ProcessUtilityContext - processUtilityContext); extern ObjectAddress AlterFunctionSchemaStmtObjectAddress(Node *stmt, bool missing_ok); -extern List * PostprocessAlterFunctionSchemaStmt(Node *stmt, - const char *queryString); -extern List * PreprocessDropFunctionStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); extern List * PreprocessAlterFunctionDependsStmt(Node *stmt, const char *queryString, ProcessUtilityContext @@ -417,8 +370,6 @@ extern List * PreprocessAlterObjectSchemaStmt(Node *alterObjectSchemaStmt, const char *alterObjectSchemaCommand); extern List * PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); extern ObjectAddress CreateSchemaStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok); @@ -509,70 +460,10 @@ extern bool ConstrTypeUsesIndex(ConstrType constrType); /* text_search.c - forward declarations */ -extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node, - const char *queryString); -extern List * PostprocessCreateTextSearchDictionaryStmt(Node *node, - const char *queryString); extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address); extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *address); extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address); extern List * CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address); -extern List * PreprocessDropTextSearchConfigurationStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessDropTextSearchDictionaryStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterTextSearchDictionaryStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessRenameTextSearchDictionaryStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterTextSearchDictionarySchemaStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, - const char *queryString); -extern List * PostprocessAlterTextSearchDictionarySchemaStmt(Node *node, - const char *queryString); -extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessTextSearchDictionaryCommentStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PreprocessAlterTextSearchDictionaryOwnerStmt(Node *node, - const char *queryString, - ProcessUtilityContext - processUtilityContext); -extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, - const char *queryString); -extern List * PostprocessAlterTextSearchDictionaryOwnerStmt(Node *node, - const char *queryString); extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok); extern ObjectAddress CreateTextSearchDictObjectAddress(Node *node, @@ -605,28 +496,9 @@ extern List * get_ts_config_namelist(Oid tsconfigOid); extern void PreprocessTruncateStatement(TruncateStmt *truncateStatement); /* type.c - forward declarations */ -extern List * PreprocessCompositeTypeStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessCompositeTypeStmt(Node *stmt, const char *queryString); -extern List * PreprocessAlterTypeStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessCreateEnumStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessCreateEnumStmt(Node *stmt, const char *queryString); -extern List * PreprocessAlterEnumStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessDropTypeStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessRenameTypeStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); extern List * PreprocessRenameTypeAttributeStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * PreprocessAlterTypeSchemaStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PreprocessAlterTypeOwnerStmt(Node *stmt, const char *queryString, - ProcessUtilityContext processUtilityContext); -extern List * PostprocessAlterTypeSchemaStmt(Node *stmt, const char *queryString); extern Node * CreateTypeStmtByObjectAddress(const ObjectAddress *address); extern ObjectAddress CompositeTypeStmtObjectAddress(Node *stmt, bool missing_ok); extern ObjectAddress CreateEnumStmtObjectAddress(Node *stmt, bool missing_ok); @@ -654,6 +526,32 @@ extern void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, /* vacuum.c - forward declarations */ extern void PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand); +/* view.c - forward declarations */ +extern List * PreprocessViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessViewStmt(Node *node, const char *queryString); +extern ObjectAddress ViewStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress AlterViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * PreprocessDropViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern char * CreateViewDDLCommand(Oid viewOid); +extern char * AlterViewOwnerCommand(Oid viewOid); +extern char * DeparseViewStmt(Node *node); +extern char * DeparseDropViewStmt(Node *node); +extern bool IsViewDistributed(Oid viewOid); +extern List * CreateViewDDLCommandsIdempotent(Oid viewOid); +extern List * PreprocessAlterViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessAlterViewStmt(Node *node, const char *queryString); +extern List * PreprocessRenameViewStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern ObjectAddress RenameViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessAlterViewSchemaStmt(Node *node, const char *queryString); +extern ObjectAddress AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok); +extern bool IsViewRenameStmt(RenameStmt *renameStmt); + /* trigger.c - forward declarations */ extern List * GetExplicitTriggerCommandList(Oid relationId); extern HeapTuple GetTriggerTupleById(Oid triggerId, bool missingOk); diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 615a7c6d2..a057c67b6 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -50,13 +50,13 @@ extern bool InDelegatedProcedureCall; /* * A DDLJob encapsulates the remote tasks and commands needed to process all or - * part of a distributed DDL command. It hold the distributed relation's oid, + * part of a distributed DDL command. It hold the target object's address, * the original DDL command string (for MX DDL propagation), and a task list of * DDL_TASK-type Tasks to be executed. */ typedef struct DDLJob { - Oid targetRelationId; /* oid of the target distributed relation */ + ObjectAddress targetObjectAddress; /* target distributed object address */ /* * Whether to commit and start a new transaction before sending commands @@ -75,6 +75,7 @@ typedef struct DDLJob List *taskList; /* worker DDL tasks to execute */ } DDLJob; +extern ProcessUtility_hook_type PrevProcessUtility; extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, #if PG_VERSION_NUM >= PG_VERSION_14 diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 2e31bc9da..7a992c19a 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -35,6 +35,9 @@ /* application name used for internal connections in rebalancer */ #define CITUS_REBALANCER_NAME "citus_rebalancer" +/* application name used for connections made by run_command_on_* */ +#define CITUS_RUN_COMMAND_APPLICATION_NAME "citus_run_command" + /* deal with waiteventset errors */ #define WAIT_EVENT_SET_INDEX_NOT_INITIALIZED -1 #define WAIT_EVENT_SET_INDEX_FAILED -2 @@ -285,8 +288,6 @@ extern void FinishConnectionListEstablishment(List *multiConnectionList); extern void FinishConnectionEstablishment(MultiConnection *connection); extern void ClaimConnectionExclusively(MultiConnection *connection); extern void UnclaimConnection(MultiConnection *connection); -extern bool IsCitusInternalBackend(void); -extern bool IsRebalancerInternalBackend(void); extern void MarkConnectionConnected(MultiConnection *connection); /* waiteventset utilities */ diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 34acd0795..5a316d39a 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -144,6 +144,17 @@ extern Oid TypeOidGetNamespaceOid(Oid typeOid); extern ObjectAddress GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok); extern ObjectAddress RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok); +/* forward declarations for deparse_view_stmts.c */ +extern void QualifyDropViewStmt(Node *node); +extern void QualifyAlterViewStmt(Node *node); +extern void QualifyRenameViewStmt(Node *node); +extern void QualifyAlterViewSchemaStmt(Node *node); +extern char * DeparseRenameViewStmt(Node *stmt); +extern char * DeparseAlterViewStmt(Node *node); +extern char * DeparseDropViewStmt(Node *node); +extern char * DeparseAlterViewSchemaStmt(Node *node); + + /* forward declarations for deparse_function_stmts.c */ extern char * DeparseDropFunctionStmt(Node *stmt); extern char * DeparseAlterFunctionStmt(Node *stmt); @@ -202,6 +213,7 @@ extern char * DeparseAlterSequenceOwnerStmt(Node *node); /* forward declarations for qualify_sequence_stmt.c */ extern void QualifyRenameSequenceStmt(Node *node); +extern void QualifyDropSequenceStmt(Node *node); extern void QualifyAlterSequenceSchemaStmt(Node *node); extern void QualifyAlterSequenceOwnerStmt(Node *node); diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index f8b0b609e..e4a185b4d 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -176,5 +176,6 @@ extern List * ListTake(List *pointerList, int size); extern void * safe_list_nth(const List *list, int index); extern List * GeneratePositiveIntSequenceList(int upTo); extern List * GenerateListFromElement(void *listElement, int listLength); +extern List * list_filter_oid(List *list, bool (*keepElement)(Oid element)); #endif /* CITUS_LISTUTILS_H */ diff --git a/src/include/distributed/local_executor.h b/src/include/distributed/local_executor.h index d2b8cce9c..c555f1f82 100644 --- a/src/include/distributed/local_executor.h +++ b/src/include/distributed/local_executor.h @@ -19,7 +19,8 @@ extern bool EnableLocalExecution; extern bool LogLocalCommands; -extern int LocalExecutorLevel; +/* global variable that tracks whether the local execution is on a shard */ +extern uint64 LocalExecutorShardId; typedef enum LocalExecutionStatus { diff --git a/src/include/distributed/metadata/dependency.h b/src/include/distributed/metadata/dependency.h index af11c5f2a..32016afc7 100644 --- a/src/include/distributed/metadata/dependency.h +++ b/src/include/distributed/metadata/dependency.h @@ -22,6 +22,7 @@ extern List * GetUniqueDependenciesList(List *objectAddressesList); extern List * GetDependenciesForObject(const ObjectAddress *target); extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target); extern List * GetAllDependenciesForObject(const ObjectAddress *target); +extern bool ErrorOrWarnIfObjectHasUnsupportedDependency(ObjectAddress *objectAddress); extern DeferredErrorMessage * DeferErrorIfHasUnsupportedDependency(const ObjectAddress * objectAddress); diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 5ea04ec73..9511df4cf 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -24,6 +24,7 @@ extern bool IsObjectDistributed(const ObjectAddress *address); extern bool ClusterHasDistributedFunctionWithDistArgument(void); extern void MarkObjectDistributed(const ObjectAddress *distAddress); extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); +extern void MarkObjectDistributedLocally(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); extern bool IsTableOwnedByExtension(Oid relationId); extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index e190aef6f..051a697c5 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -144,6 +144,8 @@ extern bool IsCitusTableType(Oid relationId, CitusTableType tableType); extern bool IsCitusTableTypeCacheEntry(CitusTableCacheEntry *tableEtnry, CitusTableType tableType); +extern void SetCreateCitusTransactionLevel(int val); +extern int GetCitusCreationLevel(void); extern bool IsCitusTable(Oid relationId); extern bool IsCitusTableViaCatalog(Oid relationId); extern char PgDistPartitionViaCatalog(Oid relationId); @@ -155,6 +157,7 @@ extern List * CitusTableList(void); extern ShardInterval * LoadShardInterval(uint64 shardId); extern Oid RelationIdForShard(uint64 shardId); extern bool ReferenceTableShardId(uint64 shardId); +extern bool DistributedTableShardId(uint64 shardId); extern ShardPlacement * ShardPlacementOnGroupIncludingOrphanedPlacements(int32 groupId, uint64 shardId); extern ShardPlacement * ActiveShardPlacementOnGroup(int32 groupId, uint64 shardId); @@ -238,6 +241,7 @@ extern Oid DistShardLogicalRelidIndexId(void); extern Oid DistShardShardidIndexId(void); extern Oid DistPlacementShardidIndexId(void); extern Oid DistPlacementPlacementidIndexId(void); +extern Oid DistColocationIndexId(void); extern Oid DistTransactionRelationId(void); extern Oid DistTransactionGroupIndexId(void); extern Oid DistPlacementGroupidIndexId(void); diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index e67726bfc..e4cdf8830 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -13,6 +13,7 @@ #define METADATA_SYNC_H +#include "distributed/commands/utility_hook.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "nodes/pg_list.h" @@ -34,6 +35,7 @@ extern void SyncCitusTableMetadata(Oid relationId); extern void EnsureSequentialModeMetadataOperations(void); extern bool ClusterHasKnownMetadataWorkers(void); extern char * LocalGroupIdUpdateCommand(int32 groupId); +extern bool ShouldSyncUserCommandForObject(ObjectAddress objectAddress); extern bool ShouldSyncTableMetadata(Oid relationId); extern bool ShouldSyncTableMetadataViaCatalog(Oid relationId); extern List * NodeMetadataCreateCommands(void); @@ -63,6 +65,7 @@ extern TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId); extern void CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId); extern List * InterTableRelationshipOfRelationCommandList(Oid relationId); extern List * DetachPartitionCommandList(void); +extern void SyncNodeMetadataToNodes(void); extern BackgroundWorkerHandle * SpawnSyncNodeMetadataToNodes(Oid database, Oid owner); extern void SyncNodeMetadataToNodesMain(Datum main_arg); extern void SignalMetadataSyncDaemon(Oid database, int sig); diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 0d9f125d8..0b4c344e0 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -23,6 +23,7 @@ #include "catalog/objectaddress.h" #include "distributed/citus_nodes.h" #include "distributed/connection_management.h" +#include "distributed/errormessage.h" #include "distributed/relay_utility.h" #include "utils/acl.h" #include "utils/relcache.h" @@ -41,6 +42,15 @@ #define SHARD_SIZES_COLUMN_COUNT (3) +/* + * Flag to keep track of whether the process is currently in a function converting the + * type of the table. Since it only affects the level of the log shown while dropping/ + * recreating table within the table type conversion, rollbacking to the savepoint hasn't + * been implemented for the sake of simplicity. If you are planning to use that flag for + * any other purpose, please consider implementing that. + */ +extern bool InTableTypeConversionFunctionCall; + /* In-memory representation of a typed tuple in pg_dist_shard. */ typedef struct ShardInterval { @@ -249,7 +259,11 @@ extern void CreateTruncateTrigger(Oid relationId); extern TableConversionReturn * UndistributeTable(TableConversionParameters *params); extern void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target); +extern DeferredErrorMessage * DeferErrorIfCircularDependencyExists(const + ObjectAddress * + objectAddress); extern List * GetDistributableDependenciesForObject(const ObjectAddress *target); +extern List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency); extern bool ShouldPropagate(void); extern bool ShouldPropagateCreateInCoordinatedTransction(void); extern bool ShouldPropagateObject(const ObjectAddress *address); diff --git a/src/include/distributed/multi_executor.h b/src/include/distributed/multi_executor.h index dd10c511d..c8254bf44 100644 --- a/src/include/distributed/multi_executor.h +++ b/src/include/distributed/multi_executor.h @@ -61,6 +61,7 @@ typedef struct TransactionProperties } TransactionProperties; +extern bool AllowNestedDistributedExecution; extern int MultiShardConnectionType; extern bool WritableStandbyCoordinator; extern bool AllowModificationsFromWorkersToReplicatedTables; @@ -150,8 +151,7 @@ extern void ExtractParametersFromParamList(ParamListInfo paramListInfo, const char ***parameterValues, bool useOriginalCustomTypeOids); extern ParamListInfo ExecutorBoundParams(void); -extern void EnsureRemoteTaskExecutionAllowed(void); -extern bool InTaskExecution(void); +extern void EnsureTaskExecutionAllowed(bool isRemote); #endif /* MULTI_EXECUTOR_H */ diff --git a/src/include/distributed/param_utils.h b/src/include/distributed/param_utils.h new file mode 100644 index 000000000..3e2a6af86 --- /dev/null +++ b/src/include/distributed/param_utils.h @@ -0,0 +1,15 @@ +/*------------------------------------------------------------------------- + * param_utils.h + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#ifndef PARAM_UTILS_H +#define PARAM_UTILS_H + +extern bool GetParamsUsedInQuery(Node *expression, Bitmapset **paramBitmap); +extern void MarkUnreferencedExternParams(Node *expression, ParamListInfo boundParams); + +#endif /* PARAM_UTILS_H */ diff --git a/src/include/distributed/remote_commands.h b/src/include/distributed/remote_commands.h index 7e2c4852f..93d4f6bfd 100644 --- a/src/include/distributed/remote_commands.h +++ b/src/include/distributed/remote_commands.h @@ -66,4 +66,8 @@ extern void WaitForAllConnections(List *connectionList, bool raiseInterrupts); extern bool SendCancelationRequest(MultiConnection *connection); +extern bool EvaluateSingleQueryResult(MultiConnection *connection, PGresult *queryResult, + StringInfo queryResultString); +extern void StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString); + #endif /* REMOTE_COMMAND_H */ diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index 4fa53144c..b87896b99 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -16,6 +16,7 @@ #include "distributed/worker_transaction.h" #include "nodes/pg_list.h" #include "storage/lock.h" +#include "tcop/utility.h" /* @@ -109,6 +110,26 @@ typedef enum CitusOperations (uint32) 0, \ ADV_LOCKTAG_CLASS_CITUS_PLACEMENT_CLEANUP) +/* + * DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations + */ +enum DistLockConfigs +{ + /* + * lock citus tables + */ + DIST_LOCK_DEFAULT = 0, + + /* + * lock tables that refer to locked citus tables with a foreign key + */ + DIST_LOCK_REFERENCING_TABLES = 1, + + /* + * throw an error if the lock is not immediately available + */ + DIST_LOCK_NOWAIT = 2 +}; /* Lock shard/relation metadata for safe modifications */ extern void LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode); @@ -151,5 +172,10 @@ extern void LockParentShardResourceIfPartition(List *shardIntervalList, /* Lock mode translation between text and enum */ extern LOCKMODE LockModeTextToLockMode(const char *lockModeName); extern const char * LockModeToLockModeText(LOCKMODE lockMode); +extern void AcquireDistributedLockOnRelations(List *relationList, LOCKMODE lockMode, + uint32 configs); +extern void PreprocessLockStatement(LockStmt *stmt, ProcessUtilityContext context); + +extern bool EnableAcquiringUnsafeLockFromWorkers; #endif /* RESOURCE_LOCK_H */ diff --git a/src/include/distributed/transaction_management.h b/src/include/distributed/transaction_management.h index 2c958d041..d7a008054 100644 --- a/src/include/distributed/transaction_management.h +++ b/src/include/distributed/transaction_management.h @@ -75,6 +75,12 @@ typedef struct AllowedDistributionColumn int executorLevel; } AllowedDistributionColumn; +/* + * The current distribution column value passed as an argument to a forced + * function call delegation. + */ +extern AllowedDistributionColumn AllowedDistributionColumnValue; + /* * GUC that determines whether a SELECT in a transaction block should also run in * a transaction block on the worker. diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 27de1d464..e861b8a65 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -63,6 +63,7 @@ extern char *WorkerListFileName; extern char *CurrentCluster; extern bool ReplicateReferenceTablesOnActivate; +extern void ActivateNodeList(List *nodeList); extern int ActivateNode(char *nodeName, int nodePort); /* Function declarations for finding worker nodes to place shards on */ diff --git a/src/include/distributed/worker_shard_visibility.h b/src/include/distributed/worker_shard_visibility.h index 957992fed..7eea5fbf7 100644 --- a/src/include/distributed/worker_shard_visibility.h +++ b/src/include/distributed/worker_shard_visibility.h @@ -15,7 +15,7 @@ extern bool OverrideTableVisibility; extern bool EnableManualChangesToShards; -extern char *HideShardsFromAppNamePrefixes; +extern char *ShowShardsForAppNamePrefixes; extern void HideShardsFromSomeApplications(Query *query); diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 6cb7d8bce..72b16acd5 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -22,9 +22,27 @@ */ typedef enum TargetWorkerSet { + /* + * All the active primary nodes in the metadata which have metadata + * except the coordinator + */ NON_COORDINATOR_METADATA_NODES, + + /* + * All the active primary nodes in the metadata except the coordinator + */ NON_COORDINATOR_NODES, - ALL_SHARD_NODES + + /* + * All active primary nodes in the metadata + */ + ALL_SHARD_NODES, + + /* + * All the active primary nodes in the metadata which have metadata + * (includes the coodinator if it is added) + */ + METADATA_NODES } TargetWorkerSet; @@ -56,10 +74,11 @@ extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort, const char *nodeUser, List *commandList); -extern void SendMetadataCommandListToWorkerInCoordinatedTransaction(const char *nodeName, - int32 nodePort, - const char *nodeUser, - List *commandList); +extern void SendMetadataCommandListToWorkerListInCoordinatedTransaction( + List *workerNodeList, + const char * + nodeUser, + List *commandList); extern void SendCommandToWorkersOptionalInParallel(TargetWorkerSet targetWorkerSet, const char *command, const char *user); diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index ad7a8bbb0..33cb524bd 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -13,6 +13,55 @@ #include "distributed/pg_version_constants.h" +#if PG_VERSION_NUM >= PG_VERSION_15 +#define ProcessCompletedNotifies() +#define RelationCreateStorage_compat(a, b, c) RelationCreateStorage(a, b, c) +#define parse_analyze_varparams_compat(a, b, c, d, e) parse_analyze_varparams(a, b, c, d, \ + e) +#else + +#include "nodes/value.h" +#include "storage/smgr.h" +#include "utils/int8.h" +#include "utils/rel.h" + +typedef Value String; + +#ifdef HAVE_LONG_INT_64 +#define strtoi64(str, endptr, base) ((int64) strtol(str, endptr, base)) +#define strtou64(str, endptr, base) ((uint64) strtoul(str, endptr, base)) +#else +#define strtoi64(str, endptr, base) ((int64) strtoll(str, endptr, base)) +#define strtou64(str, endptr, base) ((uint64) strtoull(str, endptr, base)) +#endif +#define RelationCreateStorage_compat(a, b, c) RelationCreateStorage(a, b) +#define parse_analyze_varparams_compat(a, b, c, d, e) parse_analyze_varparams(a, b, c, d) +#define pgstat_init_relation(r) pgstat_initstats(r) +#define pg_analyze_and_rewrite_fixedparams(a, b, c, d, e) pg_analyze_and_rewrite(a, b, c, \ + d, e) + +static inline int64 +pg_strtoint64(char *s) +{ + int64 result; + (void) scanint8(s, false, &result); + return result; +} + + +static inline SMgrRelation +RelationGetSmgr(Relation rel) +{ + if (unlikely(rel->rd_smgr == NULL)) + { + smgrsetowner(&(rel->rd_smgr), smgropen(rel->rd_node, rel->rd_backend)); + } + return rel->rd_smgr; +} + + +#endif + #if PG_VERSION_NUM >= PG_VERSION_14 #define AlterTableStmtObjType_compat(a) ((a)->objtype) #define getObjectTypeDescription_compat(a, b) getObjectTypeDescription(a, b) @@ -28,6 +77,8 @@ standard_ProcessUtility(a, b, c, d, e, f, g, h) #define ProcessUtility_compat(a, b, c, d, e, f, g, h) \ ProcessUtility(a, b, c, d, e, f, g, h) +#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \ + PrevProcessUtility(a, b, c, d, e, f, g, h) #define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \ SetTuplestoreDestReceiverParams(a, b, c, d, e, f) #define pgproc_statusflags_compat(pgproc) ((pgproc)->statusFlags) @@ -57,6 +108,8 @@ #define standard_ProcessUtility_compat(a, b, c, d, e, f, g, h) \ standard_ProcessUtility(a, b, d, e, f, g, h) #define ProcessUtility_compat(a, b, c, d, e, f, g, h) ProcessUtility(a, b, d, e, f, g, h) +#define PrevProcessUtility_compat(a, b, c, d, e, f, g, h) \ + PrevProcessUtility(a, b, d, e, f, g, h) #define COPY_FRONTEND COPY_NEW_FE #define SetTuplestoreDestReceiverParams_compat(a, b, c, d, e, f) \ SetTuplestoreDestReceiverParams(a, b, c, d) diff --git a/src/test/columnar_freezing/Makefile b/src/test/columnar_freezing/Makefile new file mode 100644 index 000000000..565e204fd --- /dev/null +++ b/src/test/columnar_freezing/Makefile @@ -0,0 +1,37 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/test/columnar_freezing +# +# Test that columnar freezing works. +# +#------------------------------------------------------------------------- + +subdir = src/test/columnar_freezing +top_builddir = ../../.. +include $(top_builddir)/Makefile.global + +# copied from pgxs/Makefile.global to use postgres' abs build dir for pg_regress +ifeq ($(enable_tap_tests),yes) + +define citus_prove_installcheck +rm -rf '$(CURDIR)'/tmp_check +$(MKDIR_P) '$(CURDIR)'/tmp_check +cd $(srcdir) && \ +TESTDIR='$(CURDIR)' \ +PATH="$(bindir):$$PATH" \ +PGPORT='6$(DEF_PGPORT)' \ +top_builddir='$(CURDIR)/$(top_builddir)' \ +PG_REGRESS='$(pgxsdir)/src/test/regress/pg_regress' \ +TEMP_CONFIG='$(CURDIR)'/postgresql.conf \ +$(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),t/*.pl) +endef + +else +citus_prove_installcheck = @echo "TAP tests not enabled when postgres was compiled" +endif + +installcheck: + $(citus_prove_installcheck) + +clean distclean maintainer-clean: + rm -rf tmp_check diff --git a/src/test/columnar_freezing/postgresql.conf b/src/test/columnar_freezing/postgresql.conf new file mode 100644 index 000000000..39521cc33 --- /dev/null +++ b/src/test/columnar_freezing/postgresql.conf @@ -0,0 +1,7 @@ +shared_preload_libraries=citus +shared_preload_libraries='citus' +vacuum_freeze_min_age = 50000 +vacuum_freeze_table_age = 50000 +synchronous_commit = off +fsync = off + diff --git a/src/test/columnar_freezing/t/001_columnar_freezing.pl b/src/test/columnar_freezing/t/001_columnar_freezing.pl new file mode 100644 index 000000000..1985da2a5 --- /dev/null +++ b/src/test/columnar_freezing/t/001_columnar_freezing.pl @@ -0,0 +1,52 @@ +# Minimal test testing streaming replication +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 2; + +# Initialize single node +my $node_one = get_new_node('node_one'); +$node_one->init(); +$node_one->start; + +# initialize the citus extension +$node_one->safe_psql('postgres', "CREATE EXTENSION citus;"); + +# create columnar table and insert simple data to verify the data survives a crash +$node_one->safe_psql('postgres', " +CREATE TABLE test_row(i int); +INSERT INTO test_row VALUES (1); +CREATE TABLE test_columnar_freeze(i int) USING columnar WITH(autovacuum_enabled=false); +INSERT INTO test_columnar_freeze VALUES (1); +"); + +my $ten_thousand_updates = ""; + +foreach (1..10000) { + $ten_thousand_updates .= "UPDATE test_row SET i = i + 1;\n"; +} + +# 70K updates +foreach (1..7) { + $node_one->safe_psql('postgres', $ten_thousand_updates); +} + +my $result = $node_one->safe_psql('postgres', " +select age(relfrozenxid) < 70000 as was_frozen + from pg_class where relname='test_columnar_freeze'; +"); +print "node one count: $result\n"; +is($result, qq(f), 'columnar table was not frozen'); + +$node_one->safe_psql('postgres', 'VACUUM FREEZE test_columnar_freeze;'); + +$result = $node_one->safe_psql('postgres', " +select age(relfrozenxid) < 70000 as was_frozen + from pg_class where relname='test_columnar_freeze'; +"); +print "node one count: $result\n"; +is($result, qq(t), 'columnar table was frozen'); + +$node_one->stop('fast'); + diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 329d63722..2c15915a2 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -219,6 +219,20 @@ s/^(ERROR: child table is missing constraint "\w+)_([0-9])+"/\1_xxxxxx"/g } } +# normalize for random waits for CREATE INDEX CONCURRENTLY isolation tests. +# outputs can be in separate lines, or on the same line, and hence +# we have a slightly more complex pattern. +# All the flaky tests use a index name that starts with `flaky` so we limit the +# normalization using that pattern. +/CREATE INDEX CONCURRENTLY flaky/ { + N; s/ // +} + +# Remove completion lines in isolation tests for CREATE INDEX CONCURRENTLY commands. +# This is needed because the commands that are executed on the shards can block each other +# for a small window of time and we may see the completion output in different lines. +/step s2-flaky.* <... completed>/d + # normalize long table shard name errors for alter_table_set_access_method and alter_distributed_table s/^(ERROR: child table is missing constraint "\w+)_([0-9])+"/\1_xxxxxx"/g s/^(DEBUG: the name of the shard \(abcde_01234567890123456789012345678901234567890_f7ff6612)_([0-9])+/\1_xxxxxx/g diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index c2e770d79..dcde8a5af 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -214,10 +214,10 @@ def save_regression_diff(name, output_dir): shutil.move(path, new_file_path) -def sync_metadata_to_workers(pg_path, worker_ports, coordinator_port): +def stop_metadata_to_workers(pg_path, worker_ports, coordinator_port): for port in worker_ports: command = ( - "SELECT * from start_metadata_sync_to_node('localhost', {port});".format( + "SELECT * from stop_metadata_sync_to_node('localhost', {port});".format( port=port ) ) @@ -286,8 +286,8 @@ def initialize_citus_cluster(bindir, datadir, settings, config): start_databases(bindir, datadir, config.node_name_to_ports, config.name, config.env_variables) create_citus_extension(bindir, config.node_name_to_ports.values()) add_workers(bindir, config.worker_ports, config.coordinator_port()) - if config.is_mx: - sync_metadata_to_workers(bindir, config.worker_ports, config.coordinator_port()) + if not config.is_mx: + stop_metadata_to_workers(bindir, config.worker_ports, config.coordinator_port()) if config.add_coordinator_to_metadata: add_coordinator_to_metadata(bindir, config.coordinator_port()) config.setup_steps() diff --git a/src/test/regress/citus_tests/config.py b/src/test/regress/citus_tests/config.py index c88efa814..9e9292e7f 100644 --- a/src/test/regress/citus_tests/config.py +++ b/src/test/regress/citus_tests/config.py @@ -176,7 +176,6 @@ class CitusUpgradeConfig(CitusBaseClusterConfig): self.user = SUPER_USER_NAME self.mixed_mode = arguments["--mixed"] self.fixed_port = 57635 - self.is_mx = False class PostgresConfig(CitusDefaultClusterConfig): @@ -187,6 +186,7 @@ class PostgresConfig(CitusDefaultClusterConfig): self.new_settings = { "citus.use_citus_managed_tables": False, } + self.skip_tests = ["nested_execution"] class CitusSingleNodeClusterConfig(CitusDefaultClusterConfig): @@ -321,11 +321,14 @@ class CitusShardReplicationFactorClusterConfig(CitusDefaultClusterConfig): self.new_settings = {"citus.shard_replication_factor": 2} self.skip_tests = [ # citus does not support foreign keys in distributed tables - # when citus.shard_replication_factor > 2 + # when citus.shard_replication_factor >= 2 "arbitrary_configs_truncate_partition_create", "arbitrary_configs_truncate_partition", # citus does not support modifying a partition when - # citus.shard_replication_factor > 2 - "arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade"] + # citus.shard_replication_factor >= 2 + "arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade", + # citus does not support colocating functions with distributed tables when + # citus.shard_replication_factor >= 2 + "function_create", "functions"] class CitusSingleShardClusterConfig(CitusDefaultClusterConfig): @@ -338,6 +341,9 @@ class CitusNonMxClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.is_mx = False + # citus does not support distributed functions + # when metadata is not synced + self.skip_tests = ["function_create", "functions", "nested_execution"] class PGUpgradeConfig(CitusBaseClusterConfig): diff --git a/src/test/regress/columnar_schedule b/src/test/regress/columnar_schedule index ca2913c55..6c888b3b8 100644 --- a/src/test/regress/columnar_schedule +++ b/src/test/regress/columnar_schedule @@ -27,6 +27,7 @@ test: columnar_clean test: columnar_types_without_comparison test: columnar_chunk_filtering test: columnar_join +test: columnar_pg15 test: columnar_trigger test: columnar_tableoptions test: columnar_recursive diff --git a/src/test/regress/create_schedule b/src/test/regress/create_schedule index c0967a29d..0ba1a7f05 100644 --- a/src/test/regress/create_schedule +++ b/src/test/regress/create_schedule @@ -1,13 +1,14 @@ test: intermediate_result_pruning_create test: prepared_statements_create_load ch_benchmarks_create_load test: dropped_columns_create_load distributed_planning_create_load -test: local_dist_join_load +test: local_dist_join_load nested_execution_create test: partitioned_indexes_create test: connectivity_checks test: schemas_create test: views_create test: sequences_create test: index_create +test: function_create test: arbitrary_configs_truncate_create test: arbitrary_configs_truncate_cascade_create test: arbitrary_configs_truncate_partition_create diff --git a/src/test/regress/expected/alter_distributed_table.out b/src/test/regress/expected/alter_distributed_table.out index cc92505ad..2bdc36b8c 100644 --- a/src/test/regress/expected/alter_distributed_table.out +++ b/src/test/regress/expected/alter_distributed_table.out @@ -904,5 +904,82 @@ SELECT amname FROM pg_am WHERE oid IN (SELECT relam FROM pg_class WHERE relname columnar (1 row) +-- verify that alter_distributed_table works if it has dependent views and materialized views +-- set colocate_with explicitly to not to affect other tables +CREATE SCHEMA schema_to_test_alter_dist_table; +SET search_path to schema_to_test_alter_dist_table; +CREATE TABLE test_alt_dist_table_1(a int, b int); +SELECT create_distributed_table('test_alt_dist_table_1', 'a', colocate_with => 'None'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE test_alt_dist_table_2(a int, b int); +SELECT create_distributed_table('test_alt_dist_table_2', 'a', colocate_with => 'test_alt_dist_table_1'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW dependent_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2; +CREATE VIEW dependent_view_2 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2 JOIN test_alt_dist_table_1 USING(a); +CREATE MATERIALIZED VIEW dependent_mat_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2; +-- Alter owner to make sure that alter_distributed_table doesn't change view's owner SET client_min_messages TO WARNING; +CREATE USER alter_dist_table_test_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER alter_dist_table_test_user$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +ALTER VIEW dependent_view_1 OWNER TO alter_dist_table_test_user; +ALTER VIEW dependent_view_2 OWNER TO alter_dist_table_test_user; +ALTER MATERIALIZED VIEW dependent_mat_view_1 OWNER TO alter_dist_table_test_user; +SELECT alter_distributed_table('test_alt_dist_table_1', shard_count:=12, cascade_to_colocated:=true); + alter_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT viewowner FROM pg_views WHERE viewname IN ('dependent_view_1', 'dependent_view_2'); + viewowner +--------------------------------------------------------------------- + alter_dist_table_test_user + alter_dist_table_test_user +(2 rows) + +SELECT matviewowner FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1'; + matviewowner +--------------------------------------------------------------------- + alter_dist_table_test_user +(1 row) + +-- Check the existence of the view on the worker node as well +SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_1'$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,alter_dist_table_test_user) + (localhost,57638,t,alter_dist_table_test_user) +(2 rows) + +SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_2'$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,alter_dist_table_test_user) + (localhost,57638,t,alter_dist_table_test_user) +(2 rows) + +-- It is expected to not have mat view on worker node +SELECT run_command_on_workers($$SELECT count(*) FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +RESET search_path; DROP SCHEMA alter_distributed_table CASCADE; +DROP SCHEMA schema_to_test_alter_dist_table CASCADE; diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out index 75ddac37e..da6cbd868 100644 --- a/src/test/regress/expected/alter_table_set_access_method.out +++ b/src/test/regress/expected/alter_table_set_access_method.out @@ -575,6 +575,9 @@ CREATE TABLE local(a int, b bigserial, c int default nextval('c_seq')); INSERT INTO local VALUES (3); create materialized view m_local as select * from local; create view v_local as select * from local; +WARNING: "view v_local" has dependency to "table local" that is not in Citus' metadata +DETAIL: "view v_local" will be created only locally +HINT: Distribute "table local" first to distribute "view v_local" CREATE TABLE ref(a int); SELECT create_Reference_table('ref'); create_reference_table diff --git a/src/test/regress/expected/citus_local_table_triggers.out b/src/test/regress/expected/citus_local_table_triggers.out index a5925cb25..97b93e756 100644 --- a/src/test/regress/expected/citus_local_table_triggers.out +++ b/src/test/regress/expected/citus_local_table_triggers.out @@ -423,7 +423,7 @@ NOTICE: executing the command locally: SELECT value FROM citus_local_table_trig (2 rows) ROLLBACK; --- cannot perform remote execution from a trigger on a Citus local table +-- can perform remote execution from a trigger on a Citus local table BEGIN; -- update should actually update something to test ON UPDATE CASCADE logic INSERT INTO another_citus_local_table VALUES (600); @@ -444,7 +444,8 @@ NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1 FOR EACH STATEMENT EXECUTE FUNCTION insert_100();') UPDATE another_citus_local_table SET value=value-1;; NOTICE: executing the command locally: UPDATE citus_local_table_triggers.another_citus_local_table_1507009 another_citus_local_table SET value = (value OPERATOR(pg_catalog.-) 1) -ERROR: cannot execute a distributed query from a query on a shard +NOTICE: executing the command locally: INSERT INTO citus_local_table_triggers.reference_table_1507010 (value) VALUES (100) +NOTICE: executing the command locally: INSERT INTO citus_local_table_triggers.reference_table_1507010 (value) VALUES (100) ROLLBACK; -- can perform regular execution from a trigger on a Citus local table BEGIN; diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index 0a50232ba..8923d3e82 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -224,7 +224,7 @@ ERROR: operation is not allowed on this node CREATE STATISTICS stx9 ON a, b FROM citus_local_table_stats2; DROP STATISTICS stx8; DROP STATISTICS stx4; -ERROR: statistics object "citus_local_tables_mx.stx4" does not exist +ERROR: statistics object "stx4" does not exist SELECT stxname FROM pg_statistic_ext ORDER BY stxname; stxname --------------------------------------------------------------------- diff --git a/src/test/regress/expected/citus_local_tables_queries_mx.out b/src/test/regress/expected/citus_local_tables_queries_mx.out index 52b73e7df..2eff2cd1d 100644 --- a/src/test/regress/expected/citus_local_tables_queries_mx.out +++ b/src/test/regress/expected/citus_local_tables_queries_mx.out @@ -683,11 +683,13 @@ SELECT count(*) FROM distributed_table WHERE b in 0 (1 row) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_2 AS SELECT count(*) FROM citus_local_table JOIN citus_local_table_2 USING (a) JOIN distributed_table USING (a); +RESET citus.enable_ddl_propagation; -- should fail as view contains direct local dist join SELECT count(*) FROM view_2; count @@ -695,11 +697,13 @@ SELECT count(*) FROM view_2; 1 (1 row) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_3 AS SELECT count(*) FROM citus_local_table_2 JOIN reference_table USING (a); +RESET citus.enable_ddl_propagation; -- ok SELECT count(*) FROM view_3; count diff --git a/src/test/regress/expected/columnar_citus_integration.out b/src/test/regress/expected/columnar_citus_integration.out index 582945bae..d1baced0c 100644 --- a/src/test/regress/expected/columnar_citus_integration.out +++ b/src/test/regress/expected/columnar_citus_integration.out @@ -1,4 +1,23 @@ -SET columnar.compression TO 'none'; +SELECT success, result FROM run_command_on_all_nodes($cmd$ + ALTER SYSTEM SET columnar.compression TO 'none' +$cmd$); + success | result +--------------------------------------------------------------------- + t | ALTER SYSTEM + t | ALTER SYSTEM + t | ALTER SYSTEM +(3 rows) + +SELECT success, result FROM run_command_on_all_nodes($cmd$ + SELECT pg_reload_conf() +$cmd$); + success | result +--------------------------------------------------------------------- + t | t + t | t + t | t +(3 rows) + CREATE SCHEMA columnar_citus_integration; SET search_path TO columnar_citus_integration; SET citus.next_shard_id TO 20090000; @@ -16,7 +35,7 @@ SELECT create_distributed_table('table_option', 'a'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -27,15 +46,10 @@ $cmd$); (4 rows) -- change setting -SELECT alter_columnar_table_set('table_option', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -46,15 +60,10 @@ $cmd$); (4 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', compression => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -67,7 +76,7 @@ $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -78,15 +87,10 @@ $cmd$); (4 rows) -- change setting -SELECT alter_columnar_table_set('table_option', compression_level => 13); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.compression_level = 13); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -97,15 +101,10 @@ $cmd$); (4 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', compression_level => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -118,7 +117,7 @@ $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -129,15 +128,10 @@ $cmd$); (4 rows) -- change setting -SELECT alter_columnar_table_set('table_option', chunk_group_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -148,15 +142,10 @@ $cmd$); (4 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', chunk_group_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -169,7 +158,7 @@ $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -180,15 +169,10 @@ $cmd$); (4 rows) -- change setting -SELECT alter_columnar_table_set('table_option', stripe_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -199,15 +183,10 @@ $cmd$); (4 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', stripe_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -219,16 +198,11 @@ $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 15); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 15); SELECT create_distributed_table('table_option_2', 'a'); create_distributed_table --------------------------------------------------------------------- @@ -237,7 +211,7 @@ SELECT create_distributed_table('table_option_2', 'a'); -- verify settings on placements SELECT run_command_on_placements('table_option_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -263,7 +237,7 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option'::regclass; --------------------------------------------------------------------- (0 rows) -SELECT compression FROM columnar.options WHERE regclass = 'table_option'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option'::regclass; compression --------------------------------------------------------------------- none @@ -283,7 +257,7 @@ SELECT create_distributed_table('table_option', 'a'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -298,15 +272,10 @@ $cmd$); (8 rows) -- change setting -SELECT alter_columnar_table_set('table_option', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -321,15 +290,10 @@ $cmd$); (8 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', compression => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -346,7 +310,7 @@ $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -361,15 +325,10 @@ $cmd$); (8 rows) -- change setting -SELECT alter_columnar_table_set('table_option', compression_level => 17); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.compression_level = 17); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -384,15 +343,10 @@ $cmd$); (8 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', compression_level => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -409,7 +363,7 @@ $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -424,15 +378,10 @@ $cmd$); (8 rows) -- change setting -SELECT alter_columnar_table_set('table_option', chunk_group_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -447,15 +396,10 @@ $cmd$); (8 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', chunk_group_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -472,7 +416,7 @@ $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -487,15 +431,10 @@ $cmd$); (8 rows) -- change setting -SELECT alter_columnar_table_set('table_option', stripe_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -510,15 +449,10 @@ $cmd$); (8 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option', stripe_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -534,16 +468,11 @@ $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 19); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 19); SELECT create_distributed_table('table_option_2', 'a'); create_distributed_table --------------------------------------------------------------------- @@ -552,7 +481,7 @@ SELECT create_distributed_table('table_option_2', 'a'); -- verify settings on placements SELECT run_command_on_placements('table_option_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -582,7 +511,7 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option'::regclass; --------------------------------------------------------------------- (0 rows) -SELECT compression FROM columnar.options WHERE regclass = 'table_option'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option'::regclass; compression --------------------------------------------------------------------- none @@ -600,7 +529,7 @@ SELECT create_reference_table('table_option_reference'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -609,15 +538,10 @@ $cmd$); (2 rows) -- change setting -SELECT alter_columnar_table_set('table_option_reference', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -626,15 +550,10 @@ $cmd$); (2 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', compression => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -645,7 +564,7 @@ $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -654,15 +573,10 @@ $cmd$); (2 rows) -- change setting -SELECT alter_columnar_table_set('table_option_reference', compression_level => 11); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference SET (columnar.compression_level = 11); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -671,15 +585,10 @@ $cmd$); (2 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', compression_level => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -690,7 +599,7 @@ $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -699,15 +608,10 @@ $cmd$); (2 rows) -- change setting -SELECT alter_columnar_table_set('table_option_reference', chunk_group_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -716,15 +620,10 @@ $cmd$); (2 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', chunk_group_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -735,7 +634,7 @@ $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -744,15 +643,10 @@ $cmd$); (2 rows) -- change setting -SELECT alter_columnar_table_set('table_option_reference', stripe_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -761,15 +655,10 @@ $cmd$); (2 rows) -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', stripe_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -779,16 +668,11 @@ $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_reference_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_reference_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 9); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_reference_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 9); SELECT create_reference_table('table_option_reference_2'); create_reference_table --------------------------------------------------------------------- @@ -797,7 +681,7 @@ SELECT create_reference_table('table_option_reference_2'); -- verify settings on placements SELECT run_command_on_placements('table_option_reference_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -821,7 +705,7 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option_reference'::r --------------------------------------------------------------------- (0 rows) -SELECT compression FROM columnar.options WHERE regclass = 'table_option_reference'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option_reference'::regclass; compression --------------------------------------------------------------------- none @@ -847,7 +731,7 @@ SELECT citus_add_local_table_to_metadata('table_option_citus_local'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -855,15 +739,10 @@ $cmd$); (1 row) -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -871,15 +750,10 @@ $cmd$); (1 row) -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', compression => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -889,7 +763,7 @@ $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -897,15 +771,10 @@ $cmd$); (1 row) -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', compression_level => 11); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local SET (columnar.compression_level = 11); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -913,15 +782,10 @@ $cmd$); (1 row) -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', compression_level => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -931,7 +795,7 @@ $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -939,15 +803,10 @@ $cmd$); (1 row) -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', chunk_group_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -955,15 +814,10 @@ $cmd$); (1 row) -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', chunk_group_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -973,7 +827,7 @@ $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -981,15 +835,10 @@ $cmd$); (1 row) -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', stripe_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -997,15 +846,10 @@ $cmd$); (1 row) -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', stripe_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -1014,16 +858,11 @@ $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_citus_local_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_citus_local_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 9); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_option_citus_local_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 9); SELECT citus_add_local_table_to_metadata('table_option_citus_local_2'); citus_add_local_table_to_metadata --------------------------------------------------------------------- @@ -1032,7 +871,7 @@ SELECT citus_add_local_table_to_metadata('table_option_citus_local_2'); -- verify settings on placements SELECT run_command_on_placements('table_option_citus_local_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); run_command_on_placements --------------------------------------------------------------------- @@ -1055,7 +894,7 @@ SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option_citus_local': --------------------------------------------------------------------- (0 rows) -SELECT compression FROM columnar.options WHERE regclass = 'table_option_citus_local'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option_citus_local'::regclass; compression --------------------------------------------------------------------- none diff --git a/src/test/regress/expected/columnar_create.out b/src/test/regress/expected/columnar_create.out index 9b5fdace9..5455fbd79 100644 --- a/src/test/regress/expected/columnar_create.out +++ b/src/test/regress/expected/columnar_create.out @@ -5,12 +5,7 @@ CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT, percentile FLOAT, country CHAR(3), achievements TEXT[]) USING columnar; -SELECT alter_columnar_table_set('contestant', compression => 'none'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE contestant SET (columnar.compression = none); CREATE INDEX contestant_idx on contestant(handle); -- Create zstd compressed table CREATE TABLE contestant_compressed (handle TEXT, birthdate DATE, rating INT, @@ -31,7 +26,7 @@ CREATE TABLE columnar_table_1 (a int) USING columnar; INSERT INTO columnar_table_1 VALUES (1); CREATE MATERIALIZED VIEW columnar_table_1_mv USING columnar AS SELECT * FROM columnar_table_1; -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_table_1_mv_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_table_1_mv_storage_id FROM pg_class WHERE relname='columnar_table_1_mv' \gset -- test columnar_relation_set_new_filenode REFRESH MATERIALIZED VIEW columnar_table_1_mv; @@ -41,7 +36,7 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_ f (1 row) -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_table_1_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_table_1_storage_id FROM pg_class WHERE relname='columnar_table_1' \gset BEGIN; -- test columnar_relation_nontransactional_truncate @@ -74,7 +69,7 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_ CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset SELECT pg_backend_pid() AS val INTO old_backend_pid; \c - - - :master_port @@ -110,7 +105,7 @@ SELECT COUNT(*) FROM columnar_temp WHERE i < 5; 4 (1 row) -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset BEGIN; DROP TABLE columnar_temp; @@ -142,7 +137,7 @@ BEGIN; CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar ON COMMIT DROP; -- force flushing stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,150000) i; - SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id + SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset COMMIT; -- make sure that table & it's stripe is dropped after commiting above xact @@ -162,7 +157,7 @@ BEGIN; CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar ON COMMIT DELETE ROWS; -- force flushing stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,150000) i; - SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id + SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset COMMIT; -- make sure that table is not dropped but it's rows's are deleted after commiting above xact diff --git a/src/test/regress/expected/columnar_empty.out b/src/test/regress/expected/columnar_empty.out index e68645f5b..0632fe513 100644 --- a/src/test/regress/expected/columnar_empty.out +++ b/src/test/regress/expected/columnar_empty.out @@ -5,28 +5,13 @@ SET citus.compression to 'none'; create table t_uncompressed(a int) using columnar; create table t_compressed(a int) using columnar; -- set options -SELECT alter_columnar_table_set('t_compressed', compression => 'pglz'); - alter_columnar_table_set +ALTER TABLE t_compressed SET (columnar.compression = pglz); +ALTER TABLE t_compressed SET (columnar.stripe_row_limit = 2000); +ALTER TABLE t_compressed SET (columnar.chunk_group_row_limit = 1000); +SELECT * FROM columnar.options WHERE relation = 't_compressed'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - -(1 row) - -SELECT alter_columnar_table_set('t_compressed', stripe_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - -SELECT alter_columnar_table_set('t_compressed', chunk_group_row_limit => 1000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - -SELECT * FROM columnar.options WHERE regclass = 't_compressed'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression ---------------------------------------------------------------------- - t_compressed | 1000 | 2000 | 3 | pglz + t_compressed | 1000 | 2000 | pglz | 3 (1 row) -- select diff --git a/src/test/regress/expected/columnar_fallback_scan.out b/src/test/regress/expected/columnar_fallback_scan.out index d386d63d4..c31db0c43 100644 --- a/src/test/regress/expected/columnar_fallback_scan.out +++ b/src/test/regress/expected/columnar_fallback_scan.out @@ -8,12 +8,7 @@ set columnar.enable_custom_scan = false; create table fallback_scan(i int) using columnar; -- large enough to test parallel_workers > 1 -select alter_columnar_table_set('fallback_scan', compression => 'none'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE fallback_scan SET (columnar.compression = none); insert into fallback_scan select generate_series(1,150000); vacuum analyze fallback_scan; select count(*), min(i), max(i), avg(i) from fallback_scan; diff --git a/src/test/regress/expected/columnar_first_row_number.out b/src/test/regress/expected/columnar_first_row_number.out index 87c1d9508..1b86d3819 100644 --- a/src/test/regress/expected/columnar_first_row_number.out +++ b/src/test/regress/expected/columnar_first_row_number.out @@ -7,15 +7,10 @@ BEGIN; INSERT INTO col_table_1 SELECT i FROM generate_series(1, 11) i; ROLLBACK; INSERT INTO col_table_1 SELECT i FROM generate_series(1, 12) i; -SELECT alter_columnar_table_set('col_table_1', stripe_row_limit => 1000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE col_table_1 SET (columnar.stripe_row_limit = 1000); INSERT INTO col_table_1 SELECT i FROM generate_series(1, 2350) i; SELECT row_count, first_row_number FROM columnar.stripe a -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid('col_table_1'::regclass) +WHERE a.storage_id = columnar.get_storage_id('col_table_1'::regclass) ORDER BY stripe_num; row_count | first_row_number --------------------------------------------------------------------- @@ -29,7 +24,7 @@ ORDER BY stripe_num; VACUUM FULL col_table_1; -- show that we properly update first_row_number after VACUUM FULL SELECT row_count, first_row_number FROM columnar.stripe a -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid('col_table_1'::regclass) +WHERE a.storage_id = columnar.get_storage_id('col_table_1'::regclass) ORDER BY stripe_num; row_count | first_row_number --------------------------------------------------------------------- @@ -45,7 +40,7 @@ BEGIN; COMMIT; -- show that we start with first_row_number=1 after TRUNCATE SELECT row_count, first_row_number FROM columnar.stripe a -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid('col_table_1'::regclass) +WHERE a.storage_id = columnar.get_storage_id('col_table_1'::regclass) ORDER BY stripe_num; row_count | first_row_number --------------------------------------------------------------------- diff --git a/src/test/regress/expected/columnar_indexes.out b/src/test/regress/expected/columnar_indexes.out index fa9d8d3ac..95006aac5 100644 --- a/src/test/regress/expected/columnar_indexes.out +++ b/src/test/regress/expected/columnar_indexes.out @@ -545,11 +545,11 @@ DETAIL: Key (a)=(16999) already exists. -- since second INSERT already failed, should not throw a "duplicate key" error REINDEX TABLE aborted_write_test; BEGIN; - ALTER TABLE columnar.stripe SET (autovacuum_enabled = false); - ALTER TABLE columnar.chunk SET (autovacuum_enabled = false); - ALTER TABLE columnar.chunk_group SET (autovacuum_enabled = false); + ALTER TABLE columnar_internal.stripe SET (autovacuum_enabled = false); + ALTER TABLE columnar_internal.chunk SET (autovacuum_enabled = false); + ALTER TABLE columnar_internal.chunk_group SET (autovacuum_enabled = false); DROP TABLE aborted_write_test; - TRUNCATE columnar.stripe, columnar.chunk, columnar.chunk_group; + TRUNCATE columnar_internal.stripe, columnar_internal.chunk, columnar_internal.chunk_group; CREATE TABLE aborted_write_test (a INT) USING columnar; SAVEPOINT svpt; INSERT INTO aborted_write_test SELECT i FROM generate_series(1, 2) i; @@ -559,12 +559,13 @@ BEGIN; (2 rows) ROLLBACK TO SAVEPOINT svpt; - -- Already disabled autovacuum for all three metadata tables. - -- Here we truncate columnar.chunk and columnar.chunk_group but not - -- columnar.stripe to make sure that we properly handle dead tuples - -- in columnar.stripe, i.e. stripe metadata entries for aborted - -- transactions. - TRUNCATE columnar.chunk, columnar.chunk_group; + -- Already disabled autovacuum for all three metadata tables. Here + -- we truncate columnar_internal.chunk and + -- columnar.chunk_group but not columnar.stripe to + -- make sure that we properly handle dead tuples in + -- columnar.stripe, i.e. stripe metadata entries for + -- aborted transactions. + TRUNCATE columnar_internal.chunk, columnar_internal.chunk_group; CREATE INDEX ON aborted_write_test (a); ROLLBACK; create table events (event_id bigserial, event_time timestamptz default now(), payload text) using columnar; @@ -611,7 +612,7 @@ begin; rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); ?column? --------------------------------------------------------------------- t @@ -630,7 +631,7 @@ begin; rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); ?column? --------------------------------------------------------------------- t @@ -644,7 +645,7 @@ begin; rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); ?column? --------------------------------------------------------------------- t @@ -663,7 +664,7 @@ begin; rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); ?column? --------------------------------------------------------------------- t @@ -680,7 +681,7 @@ begin; insert into uniq select generate_series(1,100); -- didn't flush anything yet, but should see the in progress stripe-write SELECT stripe_num, first_row_number, row_count FROM columnar.stripe cs - WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); + WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); stripe_num | first_row_number | row_count --------------------------------------------------------------------- 2 | 150001 | 0 @@ -689,7 +690,7 @@ begin; commit; -- should have completed the stripe reservation SELECT stripe_num, first_row_number, row_count FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); stripe_num | first_row_number | row_count --------------------------------------------------------------------- 2 | 150001 | 100 diff --git a/src/test/regress/expected/columnar_insert.out b/src/test/regress/expected/columnar_insert.out index 2227bd839..5ec0ed383 100644 --- a/src/test/regress/expected/columnar_insert.out +++ b/src/test/regress/expected/columnar_insert.out @@ -171,12 +171,7 @@ DROP TABLE test_toast_columnar; -- We support writing into zero column tables, but not reading from them. -- We test that metadata makes sense so we can fix the read path in future. CREATE TABLE zero_col() USING columnar; -SELECT alter_columnar_table_set('zero_col', chunk_group_row_limit => 1000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE zero_col SET (columnar.chunk_group_row_limit = 1000); INSERT INTO zero_col DEFAULT VALUES; INSERT INTO zero_col DEFAULT VALUES; INSERT INTO zero_col DEFAULT VALUES; @@ -200,7 +195,7 @@ select (1 row) SELECT relname, stripe_num, chunk_group_count, row_count FROM columnar.stripe a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname = 'zero_col' +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname = 'zero_col' ORDER BY 1,2,3,4; relname | stripe_num | chunk_group_count | row_count --------------------------------------------------------------------- @@ -212,14 +207,14 @@ ORDER BY 1,2,3,4; (5 rows) SELECT relname, stripe_num, value_count FROM columnar.chunk a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname = 'zero_col' +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname = 'zero_col' ORDER BY 1,2,3; relname | stripe_num | value_count --------------------------------------------------------------------- (0 rows) SELECT relname, stripe_num, chunk_group_num, row_count FROM columnar.chunk_group a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname = 'zero_col' +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname = 'zero_col' ORDER BY 1,2,3,4; relname | stripe_num | chunk_group_num | row_count --------------------------------------------------------------------- @@ -231,12 +226,7 @@ ORDER BY 1,2,3,4; (5 rows) CREATE TABLE selfinsert(x int) USING columnar; -SELECT alter_columnar_table_set('selfinsert', stripe_row_limit => 1000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE selfinsert SET (columnar.stripe_row_limit = 1000); BEGIN; INSERT INTO selfinsert SELECT generate_series(1,1010); INSERT INTO selfinsert SELECT * FROM selfinsert; diff --git a/src/test/regress/expected/columnar_matview.out b/src/test/regress/expected/columnar_matview.out index 495cb0548..2b741273e 100644 --- a/src/test/regress/expected/columnar_matview.out +++ b/src/test/regress/expected/columnar_matview.out @@ -25,33 +25,28 @@ SELECT * FROM t_view a ORDER BY a; -- show columnar options for materialized view SELECT * FROM columnar.options -WHERE regclass = 't_view'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 't_view'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - t_view | 10000 | 150000 | 3 | none + t_view | 10000 | 150000 | none | 3 (1 row) -- show we can set options on a materialized view -SELECT alter_columnar_table_set('t_view', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE t_view SET (columnar.compression = pglz); SELECT * FROM columnar.options -WHERE regclass = 't_view'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 't_view'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - t_view | 10000 | 150000 | 3 | pglz + t_view | 10000 | 150000 | pglz | 3 (1 row) REFRESH MATERIALIZED VIEW t_view; -- verify options have not been changed SELECT * FROM columnar.options -WHERE regclass = 't_view'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 't_view'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - t_view | 10000 | 150000 | 3 | pglz + t_view | 10000 | 150000 | pglz | 3 (1 row) SELECT * FROM t_view a ORDER BY a; @@ -66,7 +61,7 @@ SELECT * FROM t_view a ORDER BY a; (6 rows) -- verify that we have created metadata entries for the materialized view -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS storageid +SELECT columnar.get_storage_id(oid) AS storageid FROM pg_class WHERE relname='t_view' \gset SELECT count(*) FROM columnar.stripe WHERE storage_id=:storageid; count diff --git a/src/test/regress/expected/columnar_permissions.out b/src/test/regress/expected/columnar_permissions.out index eb058c951..d100a998e 100644 --- a/src/test/regress/expected/columnar_permissions.out +++ b/src/test/regress/expected/columnar_permissions.out @@ -1,13 +1,151 @@ +create table no_access (i int) using columnar; +insert into no_access values(1); +insert into no_access values(2); +insert into no_access values(3); select current_user \gset create user columnar_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. \c - columnar_user +-- owned by columnar_user create table columnar_permissions(i int) using columnar; insert into columnar_permissions values(1); +insert into columnar_permissions values(2); alter table columnar_permissions add column j int; -insert into columnar_permissions values(2,20); -vacuum columnar_permissions; +alter table columnar_permissions reset (columnar.compression); +alter table columnar_permissions set (columnar.compression = none); +select alter_columnar_table_reset('columnar_permissions', stripe_row_limit => true); + alter_columnar_table_reset +--------------------------------------------------------------------- + +(1 row) + +select alter_columnar_table_set('columnar_permissions', stripe_row_limit => 2222); + alter_columnar_table_set +--------------------------------------------------------------------- + +(1 row) + +select 1 from columnar.get_storage_id('columnar_permissions'::regclass); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- error +select 1 from columnar.get_storage_id('no_access'::regclass); +ERROR: must be owner of table no_access +-- only tuples related to columnar_permissions should be visible +select relation, chunk_group_row_limit, stripe_row_limit, compression, compression_level + from columnar.options + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level +--------------------------------------------------------------------- + columnar_permissions | 10000 | 2222 | none | 3 +(1 row) + +select relation, stripe_num, row_count, first_row_number + from columnar.stripe + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | stripe_num | row_count | first_row_number +--------------------------------------------------------------------- + columnar_permissions | 1 | 1 | 1 + columnar_permissions | 2 | 1 | 150001 +(2 rows) + +select relation, stripe_num, attr_num, chunk_group_num, value_count + from columnar.chunk + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | stripe_num | attr_num | chunk_group_num | value_count +--------------------------------------------------------------------- + columnar_permissions | 1 | 1 | 0 | 1 + columnar_permissions | 2 | 1 | 0 | 1 +(2 rows) + +select relation, stripe_num, chunk_group_num, row_count + from columnar.chunk_group + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | stripe_num | chunk_group_num | row_count +--------------------------------------------------------------------- + columnar_permissions | 1 | 0 | 1 + columnar_permissions | 2 | 0 | 1 +(2 rows) + truncate columnar_permissions; -drop table columnar_permissions; +insert into columnar_permissions values(2,20); +insert into columnar_permissions values(2,30); +insert into columnar_permissions values(4,40); +insert into columnar_permissions values(5,50); +vacuum columnar_permissions; +-- error: columnar_user can't alter no_access +alter table no_access reset (columnar.stripe_row_limit); +ERROR: must be owner of table no_access +alter table no_access set (columnar.stripe_row_limit = 12000); +ERROR: must be owner of table no_access +select alter_columnar_table_reset('no_access', chunk_group_row_limit => true); +ERROR: must be owner of table no_access +CONTEXT: SQL statement "ALTER TABLE no_access RESET (columnar.chunk_group_row_limit)" +PL/pgSQL function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) line XX at EXECUTE +select alter_columnar_table_set('no_access', chunk_group_row_limit => 1111); +ERROR: must be owner of table no_access +CONTEXT: SQL statement "ALTER TABLE no_access SET (columnar.chunk_group_row_limit=1111)" +PL/pgSQL function alter_columnar_table_set(regclass,integer,integer,name,integer) line XX at EXECUTE \c - :current_user +-- should see tuples from both columnar_permissions and no_access +select relation, chunk_group_row_limit, stripe_row_limit, compression, compression_level + from columnar.options + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level +--------------------------------------------------------------------- + no_access | 10000 | 150000 | zstd | 3 + columnar_permissions | 10000 | 2222 | none | 3 +(2 rows) + +select relation, stripe_num, row_count, first_row_number + from columnar.stripe + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | stripe_num | row_count | first_row_number +--------------------------------------------------------------------- + no_access | 1 | 1 | 1 + no_access | 2 | 1 | 150001 + no_access | 3 | 1 | 300001 + columnar_permissions | 1 | 1 | 1 + columnar_permissions | 2 | 1 | 2223 + columnar_permissions | 3 | 1 | 4445 + columnar_permissions | 4 | 1 | 6667 +(7 rows) + +select relation, stripe_num, attr_num, chunk_group_num, value_count + from columnar.chunk + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | stripe_num | attr_num | chunk_group_num | value_count +--------------------------------------------------------------------- + no_access | 1 | 1 | 0 | 1 + no_access | 2 | 1 | 0 | 1 + no_access | 3 | 1 | 0 | 1 + columnar_permissions | 1 | 1 | 0 | 1 + columnar_permissions | 1 | 2 | 0 | 1 + columnar_permissions | 2 | 1 | 0 | 1 + columnar_permissions | 2 | 2 | 0 | 1 + columnar_permissions | 3 | 1 | 0 | 1 + columnar_permissions | 3 | 2 | 0 | 1 + columnar_permissions | 4 | 1 | 0 | 1 + columnar_permissions | 4 | 2 | 0 | 1 +(11 rows) + +select relation, stripe_num, chunk_group_num, row_count + from columnar.chunk_group + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + relation | stripe_num | chunk_group_num | row_count +--------------------------------------------------------------------- + no_access | 1 | 0 | 1 + no_access | 2 | 0 | 1 + no_access | 3 | 0 | 1 + columnar_permissions | 1 | 0 | 1 + columnar_permissions | 2 | 0 | 1 + columnar_permissions | 3 | 0 | 1 + columnar_permissions | 4 | 0 | 1 +(7 rows) + +drop table columnar_permissions; +drop table no_access; diff --git a/src/test/regress/expected/columnar_pg15.out b/src/test/regress/expected/columnar_pg15.out new file mode 100644 index 000000000..499044fde --- /dev/null +++ b/src/test/regress/expected/columnar_pg15.out @@ -0,0 +1,60 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif +CREATE TABLE alter_am(i int); +CREATE TABLE +INSERT INTO alter_am SELECT generate_series(1,1000000); +INSERT 0 1000000 +SELECT * FROM columnar.options WHERE regclass = 'alter_am'::regclass; + regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +--------------------------------------------------------------------- +(0 rows) + +SELECT SUM(i) FROM alter_am; + sum +--------------------------------------------------------------------- + 500000500000 +(1 row) + +ALTER TABLE alter_am + SET ACCESS METHOD columnar, + SET (columnar.compression = pglz, fillfactor = 20); +ALTER TABLE +SELECT * FROM columnar.options WHERE regclass = 'alter_am'::regclass; + regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +--------------------------------------------------------------------- + alter_am | 10000 | 150000 | 3 | pglz +(1 row) + +SELECT SUM(i) FROM alter_am; + sum +--------------------------------------------------------------------- + 500000500000 +(1 row) + +ALTER TABLE alter_am SET ACCESS METHOD heap; +ALTER TABLE +-- columnar options should be gone +SELECT * FROM columnar.options WHERE regclass = 'alter_am'::regclass; + regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +--------------------------------------------------------------------- +(0 rows) + +SELECT SUM(i) FROM alter_am; + sum +--------------------------------------------------------------------- + 500000500000 +(1 row) + +-- error: setting columnar options must happen after converting to columnar +ALTER TABLE alter_am + SET (columnar.stripe_row_limit = 1111), + SET ACCESS METHOD columnar; +ERROR: ALTER TABLE cannot alter the access method after altering storage parameters +HINT: Specify SET ACCESS METHOD before storage parameters, or use separate ALTER TABLE commands. +DROP TABLE alter_am; +DROP TABLE diff --git a/src/test/regress/expected/columnar_pg15_0.out b/src/test/regress/expected/columnar_pg15_0.out new file mode 100644 index 000000000..a7e3fbf20 --- /dev/null +++ b/src/test/regress/expected/columnar_pg15_0.out @@ -0,0 +1,6 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q diff --git a/src/test/regress/expected/columnar_recursive.out b/src/test/regress/expected/columnar_recursive.out index 14f954a5a..7b4b828be 100644 --- a/src/test/regress/expected/columnar_recursive.out +++ b/src/test/regress/expected/columnar_recursive.out @@ -12,7 +12,7 @@ INSERT INTO t2 SELECT i, f(i) FROM generate_series(1, 5) i; -- there are no subtransactions, so above statement should batch -- INSERTs inside the UDF and create on stripe per table. SELECT relname, count(*) FROM columnar.stripe a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname IN ('t1', 't2') +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname IN ('t1', 't2') GROUP BY relname ORDER BY relname; relname | count diff --git a/src/test/regress/expected/columnar_rollback.out b/src/test/regress/expected/columnar_rollback.out index 2217dd788..3a3b995da 100644 --- a/src/test/regress/expected/columnar_rollback.out +++ b/src/test/regress/expected/columnar_rollback.out @@ -4,7 +4,7 @@ CREATE TABLE t(a int, b int) USING columnar; CREATE VIEW t_stripes AS SELECT * FROM columnar.stripe a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname = 't'; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname = 't'; BEGIN; INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i; ROLLBACK; diff --git a/src/test/regress/expected/columnar_tableoptions.out b/src/test/regress/expected/columnar_tableoptions.out index 7bf650e02..c9376299a 100644 --- a/src/test/regress/expected/columnar_tableoptions.out +++ b/src/test/regress/expected/columnar_tableoptions.out @@ -5,134 +5,113 @@ CREATE TABLE table_options (a int) USING columnar; INSERT INTO table_options SELECT generate_series(1,100); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 10000 | 150000 | 3 | none + table_options | 10000 | 150000 | none | 3 (1 row) -- test changing the compression -SELECT alter_columnar_table_set('table_options', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_options SET (columnar.compression = pglz); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 10000 | 150000 | 3 | pglz + table_options | 10000 | 150000 | pglz | 3 (1 row) -- test changing the compression level -SELECT alter_columnar_table_set('table_options', compression_level => 5); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_options SET (columnar.compression_level = 5); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 10000 | 150000 | 5 | pglz + table_options | 10000 | 150000 | pglz | 5 (1 row) -- test changing the chunk_group_row_limit -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 2000); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 2000 | 150000 | 5 | pglz + table_options | 2000 | 150000 | pglz | 5 (1 row) -- test changing the chunk_group_row_limit -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 4000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_options SET (columnar.stripe_row_limit = 4000); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 2000 | 4000 | 5 | pglz + table_options | 2000 | 4000 | pglz | 5 (1 row) -- VACUUM FULL creates a new table, make sure it copies settings from the table you are vacuuming VACUUM FULL table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 2000 | 4000 | 5 | pglz + table_options | 2000 | 4000 | pglz | 5 (1 row) -- set all settings at the same time -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 8000, chunk_group_row_limit => 4000, compression => 'none', compression_level => 7); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE table_options SET + (columnar.stripe_row_limit = 8000, + columnar.chunk_group_row_limit = 4000, + columnar.compression = none, + columnar.compression_level = 7); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 4000 | 8000 | 7 | none + table_options | 4000 | 8000 | none | 7 (1 row) -- make sure table options are not changed when VACUUM a table VACUUM table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 4000 | 8000 | 7 | none + table_options | 4000 | 8000 | none | 7 (1 row) -- make sure table options are not changed when VACUUM FULL a table VACUUM FULL table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 4000 | 8000 | 7 | none + table_options | 4000 | 8000 | none | 7 (1 row) -- make sure table options are not changed when truncating a table TRUNCATE table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 4000 | 8000 | 7 | none + table_options | 4000 | 8000 | none | 7 (1 row) ALTER TABLE table_options ALTER COLUMN a TYPE bigint; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 4000 | 8000 | 7 | none + table_options | 4000 | 8000 | none | 7 (1 row) -- reset settings one by one to the version of the GUC's @@ -143,66 +122,46 @@ SET columnar.compression_level TO 11; -- verify setting the GUC's didn't change the settings -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 4000 | 8000 | 7 | none -(1 row) - -SELECT alter_columnar_table_reset('table_options', chunk_group_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - + table_options | 4000 | 8000 | none | 7 (1 row) +ALTER TABLE table_options RESET (columnar.chunk_group_row_limit); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 1000 | 8000 | 7 | none -(1 row) - -SELECT alter_columnar_table_reset('table_options', stripe_row_limit => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - + table_options | 1000 | 8000 | none | 7 (1 row) +ALTER TABLE table_options RESET (columnar.stripe_row_limit); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 1000 | 10000 | 7 | none -(1 row) - -SELECT alter_columnar_table_reset('table_options', compression => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - + table_options | 1000 | 10000 | none | 7 (1 row) +ALTER TABLE table_options RESET (columnar.compression); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 1000 | 10000 | 7 | pglz -(1 row) - -SELECT alter_columnar_table_reset('table_options', compression_level => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - + table_options | 1000 | 10000 | pglz | 7 (1 row) +ALTER TABLE table_options RESET (columnar.compression_level); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 1000 | 10000 | 11 | pglz + table_options | 1000 | 10000 | pglz | 11 (1 row) -- verify resetting all settings at once work @@ -212,70 +171,139 @@ SET columnar.compression TO 'none'; SET columnar.compression_level TO 13; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 1000 | 10000 | 11 | pglz -(1 row) - -SELECT alter_columnar_table_reset( - 'table_options', - chunk_group_row_limit => true, - stripe_row_limit => true, - compression => true, - compression_level => true); - alter_columnar_table_reset ---------------------------------------------------------------------- - + table_options | 1000 | 10000 | pglz | 11 (1 row) +ALTER TABLE table_options RESET + (columnar.chunk_group_row_limit, + columnar.stripe_row_limit, + columnar.compression, + columnar.compression_level); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - table_options | 10000 | 100000 | 13 | none + table_options | 10000 | 100000 | none | 13 (1 row) -- verify edge cases -- first start with a table that is not a columnar table CREATE TABLE not_a_columnar_table (a int); -SELECT alter_columnar_table_set('not_a_columnar_table', compression => 'pglz'); -ERROR: table not_a_columnar_table is not a columnar table -SELECT alter_columnar_table_reset('not_a_columnar_table', compression => true); -ERROR: table not_a_columnar_table is not a columnar table +ALTER TABLE not_a_columnar_table SET (columnar.compression = pglz); +ERROR: columnar storage parameters specified on non-columnar table +ALTER TABLE not_a_columnar_table RESET (columnar.compression); +ERROR: columnar storage parameters specified on non-columnar table -- verify you can't use a compression that is not known -SELECT alter_columnar_table_set('table_options', compression => 'foobar'); +ALTER TABLE table_options SET (columnar.compression = foobar); +ERROR: unknown compression type for columnar table: foobar +-- verify you can't use a columnar setting that is not known +ALTER TABLE table_options SET (columnar.foobar = 123); +ERROR: unrecognized columnar storage parameter "foobar" +ALTER TABLE table_options RESET (columnar.foobar); +ERROR: unrecognized columnar storage parameter "foobar" +-- verify that invalid options are caught early, before query executes +-- (error should be about invalid options, not division-by-zero) +CREATE TABLE fail(i) USING columnar WITH (columnar.foobar = 123) AS SELECT 1/0; +ERROR: unrecognized columnar storage parameter "foobar" +CREATE TABLE fail(i) USING columnar WITH (columnar.compression = foobar) AS SELECT 1/0; ERROR: unknown compression type for columnar table: foobar -- verify cannot set out of range compression levels -SELECT alter_columnar_table_set('table_options', compression_level => 0); +ALTER TABLE table_options SET (columnar.compression_level = 0); ERROR: compression level out of range HINT: compression level must be between 1 and 19 -SELECT alter_columnar_table_set('table_options', compression_level => 20); +ALTER TABLE table_options SET (columnar.compression_level = 20); ERROR: compression level out of range HINT: compression level must be between 1 and 19 -- verify cannot set out of range stripe_row_limit & chunk_group_row_limit options -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 999); +ALTER TABLE table_options SET (columnar.stripe_row_limit = 999); ERROR: stripe row count limit out of range HINT: stripe row count limit must be between 1000 and 10000000 -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 10000001); +ALTER TABLE table_options SET (columnar.stripe_row_limit = 10000001); ERROR: stripe row count limit out of range HINT: stripe row count limit must be between 1000 and 10000000 -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 999); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 999); ERROR: chunk group row count limit out of range HINT: chunk group row count limit must be between 1000 and 100000 -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 100001); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 100001); ERROR: chunk group row count limit out of range HINT: chunk group row count limit must be between 1000 and 100000 -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 0); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 0); ERROR: chunk group row count limit out of range HINT: chunk group row count limit must be between 1000 and 100000 INSERT INTO table_options VALUES (1); +-- multiple SET/RESET clauses +ALTER TABLE table_options + SET (columnar.compression = pglz, columnar.compression_level = 7), + SET (columnar.compression_level = 6); +SELECT * FROM columnar.options +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level +--------------------------------------------------------------------- + table_options | 10000 | 100000 | pglz | 6 +(1 row) + +ALTER TABLE table_options + SET (columnar.compression = pglz, columnar.stripe_row_limit = 7777), + RESET (columnar.stripe_row_limit), + SET (columnar.chunk_group_row_limit = 5555); +SELECT * FROM columnar.options +WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level +--------------------------------------------------------------------- + table_options | 5555 | 100000 | pglz | 6 +(1 row) + +-- a no-op; shouldn't throw an error +ALTER TABLE IF EXISTS what SET (columnar.compression = lz4); +NOTICE: relation "what" does not exist, skipping +-- a no-op; shouldn't throw an error +CREATE TABLE IF NOT EXISTS table_options(a int) USING columnar + WITH (columnar.compression_level = 4); +NOTICE: relation "table_options" already exists, skipping +-- test old interface based on functions +SELECT alter_columnar_table_reset('table_options', compression => true); + alter_columnar_table_reset +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM columnar.options WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level +--------------------------------------------------------------------- + table_options | 5555 | 100000 | none | 6 +(1 row) + +SELECT alter_columnar_table_set('table_options', compression_level => 1); + alter_columnar_table_set +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM columnar.options WHERE relation = 'table_options'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level +--------------------------------------------------------------------- + table_options | 5555 | 100000 | none | 1 +(1 row) + +-- error: set columnar options on heap tables +CREATE TABLE heap_options(i int) ; +ALTER TABLE heap_options SET (columnar.stripe_row_limit = 12000); +ERROR: columnar storage parameters specified on non-columnar table +-- ordinarily, postgres allows bogus options for a RESET clause, +-- but if it's a heap table and someone specifies columnar options, +-- we block them +ALTER TABLE heap_options RESET (columnar.stripe_row_limit, foobar); +ERROR: columnar storage parameters specified on non-columnar table +DROP TABLE heap_options; -- verify options are removed when table is dropped DROP TABLE table_options; -- we expect no entries in çstore.options for anything not found int pg_class -SELECT * FROM columnar.options o WHERE o.regclass NOT IN (SELECT oid FROM pg_class); - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +SELECT * FROM columnar.options o WHERE o.relation NOT IN (SELECT oid FROM pg_class); + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/columnar_test_helpers.out b/src/test/regress/expected/columnar_test_helpers.out index f5142a431..d85bbd54f 100644 --- a/src/test/regress/expected/columnar_test_helpers.out +++ b/src/test/regress/expected/columnar_test_helpers.out @@ -1,8 +1,5 @@ CREATE SCHEMA columnar_test_helpers; SET search_path TO columnar_test_helpers; -CREATE FUNCTION columnar_relation_storageid(relid oid) RETURNS bigint - LANGUAGE C STABLE STRICT - AS 'citus', $$columnar_relation_storageid$$; CREATE OR REPLACE FUNCTION columnar_storage_info( rel regclass, version_major OUT int4, @@ -26,22 +23,22 @@ $$ LANGUAGE plpgsql; CREATE view chunk_group_consistency AS WITH a as ( SELECT storage_id, stripe_num, chunk_group_num, min(value_count) as row_count - FROM columnar.chunk + FROM columnar_internal.chunk GROUP BY 1,2,3 ), b as ( SELECT storage_id, stripe_num, chunk_group_num, max(value_count) as row_count - FROM columnar.chunk + FROM columnar_internal.chunk GROUP BY 1,2,3 ), c as ( (TABLE a EXCEPT TABLE b) UNION (TABLE b EXCEPT TABLE a) UNION - (TABLE a EXCEPT TABLE columnar.chunk_group) UNION (TABLE columnar.chunk_group EXCEPT TABLE a) + (TABLE a EXCEPT TABLE columnar_internal.chunk_group) UNION (TABLE columnar_internal.chunk_group EXCEPT TABLE a) ), d as ( SELECT storage_id, stripe_num, count(*) as chunk_group_count - FROM columnar.chunk_group + FROM columnar_internal.chunk_group GROUP BY 1,2 ), e as ( SELECT storage_id, stripe_num, chunk_group_count - FROM columnar.stripe + FROM columnar_internal.stripe ), f as ( (TABLE d EXCEPT TABLE e) UNION (TABLE e EXCEPT TABLE d) ) @@ -54,9 +51,9 @@ DECLARE BEGIN SELECT count(*) INTO union_storage_id_count FROM ( - SELECT storage_id FROM columnar.stripe UNION ALL - SELECT storage_id FROM columnar.chunk UNION ALL - SELECT storage_id FROM columnar.chunk_group + SELECT storage_id FROM columnar_internal.stripe UNION ALL + SELECT storage_id FROM columnar_internal.chunk UNION ALL + SELECT storage_id FROM columnar_internal.chunk_group ) AS union_storage_id WHERE storage_id=input_storage_id; diff --git a/src/test/regress/expected/columnar_truncate.out b/src/test/regress/expected/columnar_truncate.out index fe5169535..a3d75200e 100644 --- a/src/test/regress/expected/columnar_truncate.out +++ b/src/test/regress/expected/columnar_truncate.out @@ -201,7 +201,7 @@ TRUNCATE columnar_same_transaction_truncate; INSERT INTO columnar_same_transaction_truncate SELECT * FROM generate_series(20, 23); COMMIT; -- should output "1" for the newly created relation -SELECT count(distinct storage_id) - :columnar_data_files_before_truncate FROM columnar.stripe; +SELECT count(distinct storage_id) - :columnar_data_files_before_truncate FROM columnar_internal.stripe; ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/expected/columnar_vacuum.out b/src/test/regress/expected/columnar_vacuum.out index 3850ce1da..a4b38349a 100644 --- a/src/test/regress/expected/columnar_vacuum.out +++ b/src/test/regress/expected/columnar_vacuum.out @@ -3,7 +3,7 @@ SELECT count(distinct storage_id) AS columnar_table_count FROM columnar.stripe \ CREATE TABLE t(a int, b int) USING columnar; CREATE VIEW t_stripes AS SELECT * FROM columnar.stripe a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname='t'; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname='t'; SELECT count(*) FROM t_stripes; count --------------------------------------------------------------------- @@ -62,12 +62,7 @@ select (1 row) -- test the case when all data cannot fit into a single stripe -SELECT alter_columnar_table_set('t', stripe_row_limit => 1000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE t SET (columnar.stripe_row_limit = 1000); INSERT INTO t SELECT i, 2 * i FROM generate_series(1,2500) i; SELECT sum(a), sum(b) FROM t; sum | sum @@ -112,7 +107,7 @@ SELECT count(*) FROM t_stripes; ALTER TABLE t DROP COLUMN a; SELECT stripe_num, attr_num, chunk_group_num, minimum_value IS NULL, maximum_value IS NULL FROM columnar.chunk a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; stripe_num | attr_num | chunk_group_num | ?column? | ?column? --------------------------------------------------------------------- 1 | 1 | 0 | f | f @@ -126,7 +121,7 @@ WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AN VACUUM FULL t; SELECT stripe_num, attr_num, chunk_group_num, minimum_value IS NULL, maximum_value IS NULL FROM columnar.chunk a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; stripe_num | attr_num | chunk_group_num | ?column? | ?column? --------------------------------------------------------------------- 1 | 1 | 0 | t | t @@ -207,25 +202,15 @@ SELECT count(*) FROM t; -- add some stripes with different compression types and create some gaps, -- then vacuum to print stats BEGIN; -SELECT alter_columnar_table_set('t', - chunk_group_row_limit => 1000, - stripe_row_limit => 2000, - compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE t SET + (columnar.chunk_group_row_limit = 1000, + columnar.stripe_row_limit = 2000, + columnar.compression = pglz); SAVEPOINT s1; INSERT INTO t SELECT i FROM generate_series(1, 1500) i; ROLLBACK TO SAVEPOINT s1; INSERT INTO t SELECT i / 5 FROM generate_series(1, 1500) i; -SELECT alter_columnar_table_set('t', compression => 'none'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE t SET (columnar.compression = none); SAVEPOINT s2; INSERT INTO t SELECT i FROM generate_series(1, 1500) i; ROLLBACK TO SAVEPOINT s2; @@ -274,12 +259,7 @@ chunk count: 11, containing data for dropped columns: 2, none compressed: 9, pgl -- vacuum full should remove chunks for dropped columns -- note that, a chunk will be stored in non-compressed for if compression -- doesn't reduce its size. -SELECT alter_columnar_table_set('t', compression => 'pglz'); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE t SET (columnar.compression = pglz); VACUUM FULL t; VACUUM VERBOSE t; INFO: statistics for "t": diff --git a/src/test/regress/expected/columnar_zstd.out b/src/test/regress/expected/columnar_zstd.out index ab245ba76..c84d78660 100644 --- a/src/test/regress/expected/columnar_zstd.out +++ b/src/test/regress/expected/columnar_zstd.out @@ -36,12 +36,7 @@ SELECT DISTINCT * FROM test_zstd ORDER BY a, b, c LIMIT 5; VACUUM FULL test_zstd; SELECT pg_relation_size('test_zstd') AS size_comp_level_default \gset -- change compression level -SELECT alter_columnar_table_set('test_zstd', compression_level => 19); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE test_zstd SET (columnar.compression_level = 19); VACUUM FULL test_zstd; SELECT pg_relation_size('test_zstd') AS size_comp_level_19 \gset -- verify that higher compression level compressed better diff --git a/src/test/regress/expected/distributed_locks.out b/src/test/regress/expected/distributed_locks.out new file mode 100644 index 000000000..7d2b8e22a --- /dev/null +++ b/src/test/regress/expected/distributed_locks.out @@ -0,0 +1,155 @@ +CREATE SCHEMA distribute_lock_tests; +SET search_path TO distribute_lock_tests; +SET citus.next_shard_id TO 10000; +CREATE TABLE dist_table(a int); +SELECT create_distributed_table('dist_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_table SELECT n FROM generate_series(1, 5) n; +-- Test acquiring lock outside transaction +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ERROR: LOCK TABLE can only be used in transaction blocks +-- Test acquiring lock inside procedure +DO $$ +BEGIN +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +END; +$$; +-- Try all valid lock options; also try omitting the optional TABLE keyword. +BEGIN TRANSACTION; +LOCK TABLE dist_table IN ACCESS SHARE MODE; +LOCK dist_table IN ROW SHARE MODE; +LOCK TABLE dist_table IN ROW EXCLUSIVE MODE; +LOCK TABLE dist_table IN SHARE UPDATE EXCLUSIVE MODE; +LOCK TABLE dist_table IN SHARE MODE; +LOCK dist_table IN SHARE ROW EXCLUSIVE MODE; +LOCK TABLE dist_table IN EXCLUSIVE MODE; +LOCK TABLE dist_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +-- Test that when the user does not have the required permissions to lock +-- the locks are not forwarded to the workers +SET client_min_messages TO ERROR; +SELECT run_command_on_workers($$ + SET citus.enable_ddl_propagation TO OFF; + CREATE ROLE read_only_user WITH LOGIN; + RESET citus.enable_ddl_propagation; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,SET) + (localhost,57638,t,SET) +(2 rows) + +SET citus.enable_ddl_propagation TO OFF; +CREATE ROLE read_only_user WITH LOGIN; +GRANT ALL ON SCHEMA distribute_lock_tests TO read_only_user; +GRANT SELECT ON dist_table TO read_only_user; +RESET citus.enable_ddl_propagation; +RESET client_min_messages; +SET ROLE read_only_user; +SET citus.log_remote_commands TO ON; +BEGIN; +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ERROR: permission denied for table dist_table +ROLLBACK; +SET citus.log_remote_commands TO OFF; +RESET ROLE; +-- test that user with view permissions can lock the tables +-- which the view is built on +CREATE VIEW myview AS SELECT * FROM dist_table; +SET client_min_messages TO ERROR; +SELECT run_command_on_workers($$ + SET citus.enable_ddl_propagation TO OFF; + CREATE ROLE user_with_view_permissions WITH LOGIN; + GRANT ALL ON SCHEMA distribute_lock_tests TO user_with_view_permissions; + GRANT ALL ON distribute_lock_tests.myview TO user_with_view_permissions; + RESET citus.enable_ddl_propagation; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,SET) + (localhost,57638,t,SET) +(2 rows) + +SET citus.enable_ddl_propagation TO OFF; +CREATE ROLE user_with_view_permissions WITH LOGIN; +GRANT ALL ON SCHEMA distribute_lock_tests TO user_with_view_permissions; +GRANT ALL ON myview TO user_with_view_permissions; +RESET citus.enable_ddl_propagation; +RESET client_min_messages; +SET ROLE TO user_with_view_permissions; +BEGIN; +LOCK myview IN ACCESS EXCLUSIVE MODE; +SELECT run_command_on_workers($$ + SELECT mode FROM pg_locks WHERE relation = 'distribute_lock_tests.dist_table'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,AccessExclusiveLock) + (localhost,57638,t,AccessExclusiveLock) +(2 rows) + +ROLLBACK; +RESET ROLE; +\c - - - :worker_1_port +SET search_path TO distribute_lock_tests; +-- Test trying to lock from a worker when the coordinator is not in the metadata +SET citus.allow_unsafe_locks_from_workers TO 'off'; +BEGIN; +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ERROR: Cannot acquire a distributed lock from a worker node since the coordinator is not in the metadata. +HINT: Either run this command on the coordinator or add the coordinator in the metadata by using: SELECT citus_set_coordinator_host('', ); +Alternatively, though it is not recommended, you can allow this command by running: SET citus.allow_unsafe_locks_from_workers TO 'on'; +ROLLBACK; +-- Verify that the same restriction does not apply to worker local tables +CREATE TABLE local_table(a int); +-- Verify that no locks will be distributed for the local lock +SET citus.log_remote_commands TO ON; +BEGIN; +LOCK local_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +RESET citus.log_remote_commands; +-- Cleanup local table +DROP TABLE local_table; +-- Test that setting the guc to 'on' will allow the lock from workers +SET citus.allow_unsafe_locks_from_workers TO 'on'; +BEGIN; +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +-- Test locking a shard +SET citus.enable_manual_changes_to_shards TO OFF; +BEGIN; +LOCK dist_table_10000 IN ACCESS EXCLUSIVE MODE; +ERROR: cannot modify "dist_table_10000" because it is a shard of a distributed table +HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly +ROLLBACK; +-- Test allowing shard locks with the citus.enable_manual_changes_to_shards guc +SET citus.enable_manual_changes_to_shards TO ON; +BEGIN; +LOCK dist_table_10000 IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +RESET citus.enable_manual_changes_to_shards; +\c - - - :master_port +DROP SCHEMA distribute_lock_tests CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table distribute_lock_tests.dist_table +drop cascades to view distribute_lock_tests.myview +SET citus.enable_ddl_propagation TO OFF; +DROP ROLE read_only_user; +DROP ROLE user_with_view_permissions; +RESET citus.enable_ddl_propagation; +SELECT run_command_on_workers($$ + SET citus.enable_ddl_propagation TO OFF; + DROP USER read_only_user; + DROP USER user_with_view_permissions; + RESET citus.enable_ddl_propagation; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,SET) + (localhost,57638,t,SET) + (2 rows) + diff --git a/src/test/regress/expected/distributed_planning.out b/src/test/regress/expected/distributed_planning.out index f05e46d3a..5f8eb09e0 100644 --- a/src/test/regress/expected/distributed_planning.out +++ b/src/test/regress/expected/distributed_planning.out @@ -158,8 +158,10 @@ BEGIN; INSERT INTO test VALUES (1, 2); COMMIT; -- basic view queries +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW simple_view AS SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x); +RESET citus.enable_ddl_propagation; SELECT * FROM simple_view; cnt --------------------------------------------------------------------- diff --git a/src/test/regress/expected/distributed_triggers.out b/src/test/regress/expected/distributed_triggers.out index 4cf0cfee8..bef576fb4 100644 --- a/src/test/regress/expected/distributed_triggers.out +++ b/src/test/regress/expected/distributed_triggers.out @@ -272,6 +272,8 @@ FOR EACH ROW EXECUTE FUNCTION distributed_triggers.bad_shardkey_record_change(); -- Query-on-distributed table exception should catch this INSERT INTO data VALUES ('hello6','world6','{"hello6":"world6"}'); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO distributed_triggers.data_changes (shard_key_value, object_id, change_id, operation_type, new_value) VALUES ('BAD', NEW.object_id, COALESCE(last_change_id + 1, 1), TG_OP, NEW.value)" PL/pgSQL function bad_shardkey_record_change() line XX at SQL statement @@ -350,6 +352,8 @@ SELECT create_distributed_function( BEGIN; SELECT insert_document('hello7', 'world7'); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "UPDATE distributed_triggers.data_changes SET operation_type = TG_OP" PL/pgSQL function remote_shardkey_record_change() line XX at SQL statement while executing command on localhost:xxxxx @@ -358,6 +362,8 @@ PL/pgSQL function insert_document(text,text) line XX at SQL statement END; SELECT insert_document('hello7', 'world7'); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "UPDATE distributed_triggers.data_changes SET operation_type = TG_OP" PL/pgSQL function remote_shardkey_record_change() line XX at SQL statement SQL statement "INSERT INTO distributed_triggers.data VALUES (key, id, '{"id1":"id2"}')" @@ -501,11 +507,15 @@ AFTER INSERT OR UPDATE OR DELETE ON emptest FOR EACH STATEMENT EXECUTE FUNCTION distributed_triggers.record_emp(); INSERT INTO emptest VALUES ('test5', 1); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO distributed_triggers.record_op SELECT 'dummy', TG_OP, now()" PL/pgSQL function record_emp() line XX at SQL statement while executing command on localhost:xxxxx DELETE FROM emptest; ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO distributed_triggers.record_op SELECT 'dummy', TG_OP, now()" PL/pgSQL function distributed_triggers.record_emp() line XX at SQL statement while executing command on localhost:xxxxx @@ -543,6 +553,8 @@ FOR EACH ROW EXECUTE FUNCTION distributed_triggers.record_change(); TRUNCATE TABLE data_changes; INSERT INTO data_ref_table VALUES ('hello','world','{"ref":"table"}'); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "SELECT change_id FROM distributed_triggers.data_changes WHERE shard_key_value = NEW.shard_key_value AND object_id = NEW.object_id ORDER BY change_id DESC LIMIT 1" @@ -550,6 +562,8 @@ PL/pgSQL function record_change() line XX at SQL statement while executing command on localhost:xxxxx INSERT INTO data_ref_table VALUES ('hello2','world2','{"ref":"table"}'); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "SELECT change_id FROM distributed_triggers.data_changes WHERE shard_key_value = NEW.shard_key_value AND object_id = NEW.object_id ORDER BY change_id DESC LIMIT 1" @@ -588,6 +602,8 @@ SELECT create_reference_table('data_changes'); INSERT INTO data_ref_table VALUES ('hello','world','{"ref":"table"}'); ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO distributed_triggers.data_changes (shard_key_value, object_id, change_id, operation_type, new_value) VALUES (NEW.shard_key_value, NEW.object_id, COALESCE(last_change_id + 1, 1), TG_OP, NEW.value)" PL/pgSQL function record_change() line XX at SQL statement diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 846656aaf..2cfd6a7b7 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -46,21 +46,6 @@ FROM pg_catalog.pg_class c WHERE n.nspname IN ('drop_partitioned_table', 'schema1') AND c.relkind IN ('r','p') ORDER BY 1, 2; -\c - - - :worker_1_port -SET search_path = drop_partitioned_table; -CREATE VIEW tables_info AS -SELECT n.nspname as "Schema", - c.relname as "Name", - CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type", - pg_catalog.pg_get_userbyid(c.relowner) as "Owner" -FROM pg_catalog.pg_class c - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - LEFT JOIN pg_user u ON u.usesysid = c.relowner -WHERE n.nspname IN ('drop_partitioned_table', 'schema1') - AND c.relkind IN ('r','p') -ORDER BY 1, 2; -\c - - - :master_port -SET search_path = drop_partitioned_table; SET citus.next_shard_id TO 721000; -- CASE 1 -- Dropping the parent table diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index ca1c8f838..76952767e 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -54,7 +54,6 @@ ORDER BY placementid; (2 rows) SELECT citus_disable_node('localhost', :worker_2_proxy_port, true); -NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 9060) to activate this node back. citus_disable_node --------------------------------------------------------------------- @@ -107,10 +106,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()'); (1 row) SELECT master_activate_node('localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx -- verify node is not activated SELECT * FROM master_get_active_worker_nodes() ORDER BY 1, 2; diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index 527255bdd..612b441c7 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -218,6 +218,12 @@ COMMIT; '], false ); +WARNING: server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx master_run_on_worker --------------------------------------------------------------------- (localhost,57636,t,BEGIN) diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 7a74d91e5..7d667759d 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -13,6 +13,8 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) +\set VERBOSITY terse +SET client_min_messages TO ERROR; CREATE TABLE t1 (id int PRIMARY KEY); SELECT create_distributed_table('t1', 'id'); create_distributed_table @@ -23,7 +25,6 @@ SELECT create_distributed_table('t1', 'id'); INSERT INTO t1 SELECT x FROM generate_series(1,100) AS f(x); -- Initially turn metadata sync off because we'll ingest errors to start/stop metadata sync operations SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) stop_metadata_sync_to_node --------------------------------------------------------------------- @@ -51,12 +52,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx -- Failure to drop all tables in pg_dist_partition -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -64,19 +62,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").can SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx -- Failure to delete pg_dist_node entries from the worker -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -84,19 +79,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx -- Failure to populate pg_dist_node in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -104,17 +96,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -ERROR: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; hasmetadata @@ -153,7 +142,6 @@ SELECT create_distributed_table('t2', 'id'); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -179,7 +167,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); mitmproxy @@ -188,90 +175,27 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx stop_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -- Failure to delete pg_dist_node entries from the worker -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx stop_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/forcedelegation_functions.out b/src/test/regress/expected/forcedelegation_functions.out index 1bb6b8ba7..103742c5e 100644 --- a/src/test/regress/expected/forcedelegation_functions.out +++ b/src/test/regress/expected/forcedelegation_functions.out @@ -222,19 +222,11 @@ ROLLBACK; BEGIN; -- Query gets delegated to the node of the shard xx_900001 for the key=1, -- and the function inserts value (1+17) locally on the shard xx_900031 +-- which is not allowed because this is not a regular pushdown SELECT insert_data(intcol+17) from test_forcepushdown where intcol = 1; - insert_data ---------------------------------------------------------------------- - -(1 row) - --- This will fail with duplicate error as the function already inserted --- the value(1+17) -SELECT insert_data(18); -DEBUG: pushing down function call in a multi-statement transaction -DEBUG: pushing down the function call -ERROR: duplicate key value violates unique constraint "test_forcepushdown_pkey_900031" -DETAIL: Key (intcol)=(18) already exists. +ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (a)" PL/pgSQL function forcepushdown_schema.insert_data(integer) line XX at SQL statement while executing command on localhost:xxxxx @@ -524,19 +516,13 @@ END; -- BEGIN; -- Query lands on the shard with key = 300(shard __900089) and the function inserts locally +-- which is not allowed because this is not a regular pushdown SELECT inner_force_delegation_function(id) FROM test_nested WHERE id = 300; -NOTICE: inner_force_delegation_function():301 -DETAIL: from localhost:xxxxx - inner_force_delegation_function ---------------------------------------------------------------------- - 301 -(1 row) - --- Query lands on the shard with key = 300(shard __900089) and the function inserts remotely -SELECT insert_data_non_distarg(id) FROM test_nested WHERE id = 300; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.test_forcepushdown VALUES (a+1)" -PL/pgSQL function forcepushdown_schema.insert_data_non_distarg(integer) line XX at SQL statement +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. +CONTEXT: SQL statement "SELECT max(id)::numeric+1 FROM forcepushdown_schema.test_nested WHERE id = $1" +PL/pgSQL function forcepushdown_schema.inner_force_delegation_function(integer) line XX at SQL statement while executing command on localhost:xxxxx END; -- @@ -545,6 +531,17 @@ END; -- Param(PARAM_EXEC) node e.g. SELECT fn((SELECT col from test_nested where col=val)) BEGIN; SELECT inner_force_delegation_function((SELECT id+112 FROM test_nested WHERE id=400)); +ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. +CONTEXT: SQL statement "SELECT max(id)::numeric+1 FROM forcepushdown_schema.test_nested WHERE id = $1" +PL/pgSQL function forcepushdown_schema.inner_force_delegation_function(integer) line XX at SQL statement +while executing command on localhost:xxxxx +END; +BEGIN; +SET LOCAL citus.propagate_set_commands TO 'local'; +SET LOCAL citus.allow_nested_distributed_execution TO on; +SELECT inner_force_delegation_function((SELECT id+112 FROM test_nested WHERE id=400)); NOTICE: inner_force_delegation_function():513 DETAIL: from localhost:xxxxx inner_force_delegation_function @@ -697,6 +694,8 @@ SELECT insert_select_data(20); DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down the function call ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1)" PL/pgSQL function forcepushdown_schema.insert_select_data(integer) line XX at SQL statement while executing command on localhost:xxxxx @@ -721,6 +720,8 @@ SELECT insert_select_data(22); DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down the function call ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1)" PL/pgSQL function forcepushdown_schema.insert_select_data(integer) line XX at SQL statement while executing command on localhost:xxxxx @@ -776,6 +777,8 @@ SELECT insert_select_data_nonlocal(41); DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down the function call ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.test_forcepushdown(intcol) SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate" PL/pgSQL function forcepushdown_schema.insert_select_data_nonlocal(integer) line XX at SQL statement @@ -1106,6 +1109,8 @@ SELECT select_data(100); DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down the function call ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data = (SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)" PL/pgSQL function forcepushdown_schema.select_data(integer) line XX at SQL statement @@ -1223,6 +1228,8 @@ SELECT 1,2,3 FROM select_data(100); DEBUG: pushing down function call in a multi-statement transaction DEBUG: pushing down the function call ERROR: cannot execute a distributed query from a query on a shard +DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results. +HINT: Avoid nesting of distributed queries or use alter user current_user set citus.allow_nested_distributed_execution to on to allow it with possible incorrectness. CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data = (SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)" PL/pgSQL function forcepushdown_schema.select_data(integer) line XX at SQL statement diff --git a/src/test/regress/expected/function_create.out b/src/test/regress/expected/function_create.out new file mode 100644 index 000000000..692b60805 --- /dev/null +++ b/src/test/regress/expected/function_create.out @@ -0,0 +1,223 @@ +\set VERBOSITY terse +CREATE SCHEMA function_create; +SET search_path TO function_create; +-- helper function to verify the function of a coordinator is the same on all workers +CREATE OR REPLACE FUNCTION verify_function_is_same_on_workers(funcname text) + RETURNS bool + LANGUAGE plpgsql +AS $func$ +DECLARE + coordinatorSql text; + workerSql text; +BEGIN + SELECT pg_get_functiondef(funcname::regprocedure) INTO coordinatorSql; + FOR workerSql IN SELECT result FROM run_command_on_workers('SELECT pg_get_functiondef(' || quote_literal(funcname) || '::regprocedure)') LOOP + IF workerSql != coordinatorSql THEN + RAISE INFO 'functions are different, coordinator:% worker:%', coordinatorSql, workerSql; + RETURN false; + END IF; + END LOOP; + + RETURN true; +END; +$func$; +-- test delegating function calls +CREATE TABLE warnings ( + id int primary key, + message text +); +SELECT create_distributed_table('warnings', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO warnings VALUES (1, 'hello arbitrary config tests'); +CREATE FUNCTION warning(int, text) +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE WARNING '%', $2; +END; +$$; +SELECT create_distributed_function('warning(int,text)','$1'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- verify that the function definition is consistent in the cluster +SELECT verify_function_is_same_on_workers('function_create.warning(int,text)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +-- test a function that performs operation on the single shard of a reference table +CREATE TABLE monotonic_series(used_values int); +SELECT create_reference_table('monotonic_series'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO monotonic_series VALUES (1), (3), (5); +CREATE FUNCTION add_new_item_to_series() +RETURNS int +LANGUAGE SQL +AS $func$ +INSERT INTO monotonic_series SELECT max(used_values)+1 FROM monotonic_series RETURNING used_values; +$func$; +-- Create and distribute a simple function +CREATE FUNCTION eq(macaddr, macaddr) RETURNS bool + AS 'select $1 = $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; +-- testing alter statements for a distributed function +-- ROWS 5, untested because; +-- ERROR: ROWS is not applicable when function does not return a set +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +ALTER FUNCTION eq(macaddr,macaddr) CALLED ON NULL INPUT IMMUTABLE SECURITY INVOKER PARALLEL UNSAFE COST 5; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +ALTER FUNCTION eq(macaddr,macaddr) RETURNS NULL ON NULL INPUT STABLE SECURITY DEFINER PARALLEL RESTRICTED; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +ALTER FUNCTION eq(macaddr,macaddr) STRICT VOLATILE PARALLEL SAFE; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +-- Test SET/RESET for alter function +ALTER ROUTINE eq(macaddr,macaddr) SET client_min_messages TO debug; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +ALTER FUNCTION eq(macaddr,macaddr) RESET client_min_messages; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +ALTER FUNCTION eq(macaddr,macaddr) SET search_path TO 'sch'';ma', public; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +ALTER FUNCTION eq(macaddr,macaddr) RESET search_path; +-- rename function and make sure the new name can be used on the workers +ALTER FUNCTION eq(macaddr,macaddr) RENAME TO eq2; +SELECT verify_function_is_same_on_workers('function_create.eq2(macaddr,macaddr)'); + verify_function_is_same_on_workers +--------------------------------------------------------------------- + t +(1 row) + +-- user-defined aggregates with & without strict +create function sum2_sfunc_strict(state int, x int) +returns int immutable strict language plpgsql as $$ +begin return state + x; +end; +$$; +create function sum2_finalfunc_strict(state int) +returns int immutable strict language plpgsql as $$ +begin return state * 2; +end; +$$; +create function sum2_sfunc(state int, x int) +returns int immutable language plpgsql as $$ +begin return state + x; +end; +$$; +create function sum2_finalfunc(state int) +returns int immutable language plpgsql as $$ +begin return state * 2; +end; +$$; +create aggregate sum2 (int) ( + sfunc = sum2_sfunc, + stype = int, + finalfunc = sum2_finalfunc, + combinefunc = sum2_sfunc, + initcond = '0' +); +create aggregate sum2_strict (int) ( + sfunc = sum2_sfunc_strict, + stype = int, + finalfunc = sum2_finalfunc_strict, + combinefunc = sum2_sfunc_strict +); +-- user-defined aggregates with multiple-parameters +create function psum_sfunc(s int, x int, y int) +returns int immutable language plpgsql as $$ +begin return coalesce(s,0) + coalesce(x*y+3,1); +end; +$$; +create function psum_sfunc_strict(s int, x int, y int) +returns int immutable strict language plpgsql as $$ +begin return coalesce(s,0) + coalesce(x*y+3,1); +end; +$$; +create function psum_combinefunc(s1 int, s2 int) +returns int immutable language plpgsql as $$ +begin return coalesce(s1,0) + coalesce(s2,0); +end; +$$; +create function psum_combinefunc_strict(s1 int, s2 int) +returns int immutable strict language plpgsql as $$ +begin return coalesce(s1,0) + coalesce(s2,0); +end; +$$; +create function psum_finalfunc(x int) +returns int immutable language plpgsql as $$ +begin return x * 2; +end; +$$; +create function psum_finalfunc_strict(x int) +returns int immutable strict language plpgsql as $$ +begin return x * 2; +end; +$$; +create aggregate psum(int, int)( + sfunc=psum_sfunc, + combinefunc=psum_combinefunc, + finalfunc=psum_finalfunc, + stype=int +); +create aggregate psum_strict(int, int)( + sfunc=psum_sfunc_strict, + combinefunc=psum_combinefunc_strict, + finalfunc=psum_finalfunc_strict, + stype=int, + initcond=0 +); +-- generate test data +create table aggdata (id int, key int, val int, valf float8); +select create_distributed_table('aggdata', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out index 1ec5736f5..5c61761fb 100644 --- a/src/test/regress/expected/function_propagation.out +++ b/src/test/regress/expected/function_propagation.out @@ -177,7 +177,7 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas localhost | 57638 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) (2 rows) --- Views are not supported +-- Views are supported CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) RETURNS int @@ -187,8 +187,6 @@ BEGIN return 1; END; $$; -WARNING: "function func_7(function_prop_view)" has dependency on unsupported object "view function_prop_view" -DETAIL: "function func_7(function_prop_view)" will be created only locally CREATE OR REPLACE FUNCTION func_8(param_1 int) RETURNS function_prop_view LANGUAGE plpgsql AS @@ -197,8 +195,6 @@ BEGIN return 1; END; $$; -WARNING: "function func_8(integer)" has dependency on unsupported object "view function_prop_view" -DETAIL: "function func_8(integer)" will be created only locally -- Check within transaction BEGIN; CREATE TYPE type_in_transaction AS (a int, b int); @@ -504,7 +500,7 @@ BEGIN; ALTER TABLE table_to_dist ADD COLUMN col_1 int default function_propagation_schema.non_dist_func(NULL::non_dist_table_for_function); ERROR: "table table_to_dist" has dependency to "table non_dist_table_for_function" that is not in Citus' metadata -HINT: Distribute "table non_dist_table_for_function" first to distribute "table table_to_dist" +HINT: Distribute "table non_dist_table_for_function" first to modify "table table_to_dist" on worker nodes ROLLBACK; -- Adding multiple columns with default values should propagate the function BEGIN; diff --git a/src/test/regress/expected/functions.out b/src/test/regress/expected/functions.out new file mode 100644 index 000000000..2401f81fd --- /dev/null +++ b/src/test/regress/expected/functions.out @@ -0,0 +1,139 @@ +\set VERBOSITY terse +SET search_path TO function_create; +-- test user defined function with a distribution column argument +SELECT + warning (id, message) +FROM + warnings +WHERE + id = 1; +WARNING: hello arbitrary config tests + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (1, 'Push down to worker that holds the partition value of 1'); +WARNING: Push down to worker that holds the partition value of 1 + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (2, 'Push down to worker that holds the partition value of 2'); +WARNING: Push down to worker that holds the partition value of 2 + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (3, 'Push down to worker that holds the partition value of 3'); +WARNING: Push down to worker that holds the partition value of 3 + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (4, 'Push down to worker that holds the partition value of 4'); +WARNING: Push down to worker that holds the partition value of 4 + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (5, 'Push down to worker that holds the partition value of 5'); +WARNING: Push down to worker that holds the partition value of 5 + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (6, 'Push down to worker that holds the partition value of 6'); +WARNING: Push down to worker that holds the partition value of 6 + warning +--------------------------------------------------------------------- + +(1 row) + +SELECT warning (7, 'Push down to worker that holds the partition value of 7'); +WARNING: Push down to worker that holds the partition value of 7 + warning +--------------------------------------------------------------------- + +(1 row) + +-- insert some data to test user defined aggregates +INSERT INTO aggdata (id, key, val, valf) + VALUES (1, 1, 2, 11.2), + (2, 1, NULL, 2.1), + (3, 2, 2, 3.22), + (4, 2, 3, 4.23), + (5, 2, 5, 5.25), + (6, 3, 4, 63.4), + (7, 5, NULL, 75), + (8, 6, NULL, NULL), + (9, 6, NULL, 96), + (10, 7, 8, 1078), + (11, 9, 0, 1.19); +-- test user defined aggregates +SELECT + key, + sum2 (val), + sum2_strict (val), + stddev(valf)::numeric(10, 5), + psum (val, valf::int), + psum_strict (val, valf::int) +FROM + aggdata +GROUP BY + key +ORDER BY + key; + key | sum2 | sum2_strict | stddev | psum | psum_strict +--------------------------------------------------------------------- + 1 | | 4 | 6.43467 | 52 | 50 + 2 | 20 | 20 | 1.01500 | 104 | 104 + 3 | 8 | 8 | | 510 | 510 + 5 | | | | 2 | 0 + 6 | | | | 4 | 0 + 7 | 16 | 16 | | 17254 | 17254 + 9 | 0 | 0 | | 6 | 6 +(7 rows) + +-- test function that writes to a reference table +SELECT add_new_item_to_series(); + add_new_item_to_series +--------------------------------------------------------------------- + 6 +(1 row) + +SELECT add_new_item_to_series(); + add_new_item_to_series +--------------------------------------------------------------------- + 7 +(1 row) + +SELECT add_new_item_to_series(); + add_new_item_to_series +--------------------------------------------------------------------- + 8 +(1 row) + +SELECT add_new_item_to_series(); + add_new_item_to_series +--------------------------------------------------------------------- + 9 +(1 row) + +SELECT add_new_item_to_series(); + add_new_item_to_series +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT add_new_item_to_series(); + add_new_item_to_series +--------------------------------------------------------------------- + 11 +(1 row) + diff --git a/src/test/regress/expected/global_cancel.out b/src/test/regress/expected/global_cancel.out index 6a206b0ad..a028df420 100644 --- a/src/test/regress/expected/global_cancel.out +++ b/src/test/regress/expected/global_cancel.out @@ -1,6 +1,7 @@ CREATE SCHEMA global_cancel; SET search_path TO global_cancel; SET citus.next_shard_id TO 56789000; +SET citus.grep_remote_commands TO '%pg_cancel_backend%'; CREATE TABLE dist_table (a INT, b INT); SELECT create_distributed_table ('dist_table', 'a', shard_count:=4); create_distributed_table @@ -25,8 +26,6 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx ERROR: canceling statement due to user request BEGIN; SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1; -NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM global_cancel.dist_table_56789000 dist_table WHERE (a OPERATOR(pg_catalog.=) 1) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx ERROR: canceling statement due to user request @@ -100,13 +99,13 @@ WHERE isactive = true AND noderole = 'primary'; SELECT citus_nodeid_for_gpid(10000000000 * 2 + 3); citus_nodeid_for_gpid --------------------------------------------------------------------- - 2 + 2 (1 row) SELECT citus_pid_for_gpid(10000000000 * 2 + 3); citus_pid_for_gpid --------------------------------------------------------------------- - 3 + 3 (1 row) DROP SCHEMA global_cancel CASCADE; diff --git a/src/test/regress/expected/isolation_acquire_distributed_locks.out b/src/test/regress/expected/isolation_acquire_distributed_locks.out new file mode 100644 index 000000000..ed51f1da6 --- /dev/null +++ b/src/test/regress/expected/isolation_acquire_distributed_locks.out @@ -0,0 +1,1191 @@ +Parsed test spec with 3 sessions + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-dist-table w1-start-session-level-connection w1-begin w1-read-dist-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-dist-table: + LOCK dist_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-dist-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-dist-table w1-start-session-level-connection w1-begin w1-acquire-aggressive-lock-dist-table coor-rollback coor-read-dist-table w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-dist-table: + LOCK dist_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +step coor-rollback: + ROLLBACK; + +step w1-acquire-aggressive-lock-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-read-dist-table: + SELECT COUNT(*) FROM dist_table; + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-read-dist-table: <... completed> +count +--------------------------------------------------------------------- + 5 +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: w1-start-session-level-connection w1-begin w1-acquire-aggressive-lock-dist-table coor-begin coor-acquire-aggresive-lock-on-dist-table-nowait coor-rollback w1-rollback w1-stop-connection +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-dist-table-nowait: + LOCK dist_table IN ACCESS EXCLUSIVE MODE NOWAIT; + +ERROR: could not obtain lock on relation "public.dist_table" +step coor-rollback: + ROLLBACK; + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: w1-start-session-level-connection w1-begin w2-start-session-level-connection w2-begin w1-acquire-aggressive-lock-dist-table w2-acquire-aggressive-lock-dist-table w1-rollback w1-read-dist-table w2-rollback w1-stop-connection w2-stop-connection +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-acquire-aggressive-lock-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-dist-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table'); + +step w2-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-weak-lock-on-dist-table w1-start-session-level-connection w1-begin w1-read-dist-table w1-acquire-aggressive-lock-dist-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-weak-lock-on-dist-table: + LOCK dist_table IN ACCESS SHARE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-dist-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +step coor-rollback: + ROLLBACK; + +step w1-acquire-aggressive-lock-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: w1-start-session-level-connection w1-begin w1-lock-reference-table coor-begin coor-read-ref-table w1-rollback coor-rollback w1-stop-connection +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-lock-reference-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK ref_table IN ACCESS EXCLUSIVE MODE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-begin: + BEGIN; + +step coor-read-ref-table: + SELECT COUNT(*) FROM ref_table; + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-read-ref-table: <... completed> +count +--------------------------------------------------------------------- + 5 +(1 row) + +step coor-rollback: + ROLLBACK; + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-view w1-start-session-level-connection w1-begin w1-read-dist-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-view: + LOCK main_view IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-dist-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-view w1-start-session-level-connection w1-begin w1-acquire-aggressive-lock-dist-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-view: + LOCK main_view IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +step coor-rollback: + ROLLBACK; + +step w1-acquire-aggressive-lock-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-view w1-start-session-level-connection w1-begin w1-read-ref-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-view: + LOCK main_view IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-ref-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM ref_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-ref-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-only-view w1-start-session-level-connection w1-begin w1-read-ref-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-only-view: + LOCK ONLY main_view IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-ref-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM ref_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-ref-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: w1-start-session-level-connection w1-begin w1-acquire-aggressive-lock-dist-table coor-begin coor-acquire-aggresive-lock-on-view-nowait coor-rollback w1-rollback w1-stop-connection +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-view-nowait: + LOCK main_view IN ACCESS EXCLUSIVE MODE NOWAIT; + +ERROR: could not obtain lock on relation "dist_table" +step coor-rollback: + ROLLBACK; + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-lock-all w1-start-session-level-connection w1-begin w1-read-citus-local-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-lock-all: + LOCK dist_table, citus_local_table, ref_table, main_view, sub_view, local_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-citus-local-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM citus_local_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-citus-local-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-partitioned-table w1-start-session-level-connection w1-begin w1-read-partitioned-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-partitioned-table: + LOCK partitioned_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-partitioned-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partitioned_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-partitioned-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-partitioned-table w1-start-session-level-connection w1-begin w1-read-partition-of-partitioned-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-partitioned-table: + LOCK partitioned_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-partition-of-partitioned-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partition_1'); + +step coor-rollback: + ROLLBACK; + +step w1-read-partition-of-partitioned-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-partitioned-table-with-*-syntax w1-start-session-level-connection w1-begin w1-read-partition-of-partitioned-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-partitioned-table-with-*-syntax: + LOCK partitioned_table * IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-partition-of-partitioned-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partition_1'); + +step coor-rollback: + ROLLBACK; + +step w1-read-partition-of-partitioned-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-only-partitioned-table w1-start-session-level-connection w1-begin w1-read-partitioned-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-only-partitioned-table: + LOCK ONLY partitioned_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-partitioned-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partitioned_table'); + +step coor-rollback: + ROLLBACK; + +step w1-read-partitioned-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-only-partitioned-table w1-start-session-level-connection w1-begin w1-read-partition-of-partitioned-table coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-only-partitioned-table: + LOCK ONLY partitioned_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-partition-of-partitioned-table: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partition_1'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step coor-rollback: + ROLLBACK; + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-acquire-aggresive-lock-on-ref-table w1-start-session-level-connection w1-begin w1-read-main-view coor-rollback w1-rollback w1-stop-connection +step coor-begin: + BEGIN; + +step coor-acquire-aggresive-lock-on-ref-table: + LOCK ref_table IN ACCESS EXCLUSIVE MODE; + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-read-main-view: + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM main_view'); + +step coor-rollback: + ROLLBACK; + +step w1-read-main-view: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: coor-begin coor-read-dist-table w2-start-session-level-connection w2-begin w1-start-session-level-connection w1-begin w2-acquire-aggressive-lock-dist-table w1-acquire-aggressive-lock-dist-table coor-rollback w2-rollback w1-rollback w1-stop-connection w2-stop-connection +step coor-begin: + BEGIN; + +step coor-read-dist-table: + SELECT COUNT(*) FROM dist_table; + +count +--------------------------------------------------------------------- + 5 +(1 row) + +step w2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +step w1-acquire-aggressive-lock-dist-table: + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); + +step coor-rollback: + ROLLBACK; + +step w2-acquire-aggressive-lock-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-acquire-aggressive-lock-dist-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-rollback: + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step w2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out index 8e99fc100..26fb85aae 100644 --- a/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out +++ b/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out @@ -7,16 +7,16 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -24,18 +24,19 @@ step s1-add-second-worker: (1 row) step s2-copy-to-reference-table: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; - -step s1-commit: + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + +step s1-commit: COMMIT; +step s2-copy-to-reference-table: <... completed> step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -56,35 +57,36 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s2-begin: - BEGIN; + BEGIN; step s2-copy-to-reference-table: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); + +step s2-commit: + COMMIT; +step s1-add-second-worker: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -105,16 +107,16 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -122,18 +124,19 @@ step s1-add-second-worker: (1 row) step s2-insert-to-reference-table: - INSERT INTO test_reference_table VALUES (6); - -step s1-commit: + INSERT INTO test_reference_table VALUES (6); + +step s1-commit: COMMIT; +step s2-insert-to-reference-table: <... completed> step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -154,35 +157,36 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s2-begin: - BEGIN; + BEGIN; step s2-insert-to-reference-table: - INSERT INTO test_reference_table VALUES (6); + INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); + +step s2-commit: + COMMIT; +step s1-add-second-worker: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -203,16 +207,16 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -220,19 +224,19 @@ step s1-add-second-worker: (1 row) step s2-ddl-on-reference-table: - CREATE INDEX reference_index ON test_reference_table(test_id); + CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -253,22 +257,22 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s2-begin: - BEGIN; + BEGIN; step s2-ddl-on-reference-table: - CREATE INDEX reference_index ON test_reference_table(test_id); + CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -277,12 +281,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -303,16 +307,16 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -320,7 +324,7 @@ step s1-add-second-worker: (1 row) step s2-create-reference-table-2: - SELECT create_reference_table('test_reference_table_2'); + SELECT create_reference_table('test_reference_table_2'); step s1-commit: COMMIT; @@ -332,12 +336,12 @@ create_reference_table (1 row) step s2-print-content-2: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table_2', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table_2', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -359,16 +363,16 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-load-metadata-cache: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s2-begin: - BEGIN; + BEGIN; step s2-create-reference-table-2: - SELECT create_reference_table('test_reference_table_2'); + SELECT create_reference_table('test_reference_table_2'); create_reference_table --------------------------------------------------------------------- @@ -376,10 +380,10 @@ create_reference_table (1 row) step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -388,12 +392,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-content-2: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table_2', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table_2', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -414,13 +418,13 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -428,18 +432,19 @@ step s1-add-second-worker: (1 row) step s2-copy-to-reference-table: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; - -step s1-commit: + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + +step s1-commit: COMMIT; +step s2-copy-to-reference-table: <... completed> step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -460,32 +465,33 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-begin: - BEGIN; + BEGIN; step s2-copy-to-reference-table: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); + +step s2-commit: + COMMIT; +step s1-add-second-worker: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -506,13 +512,13 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -520,18 +526,19 @@ step s1-add-second-worker: (1 row) step s2-insert-to-reference-table: - INSERT INTO test_reference_table VALUES (6); - -step s1-commit: + INSERT INTO test_reference_table VALUES (6); + +step s1-commit: COMMIT; +step s2-insert-to-reference-table: <... completed> step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -552,32 +559,33 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-begin: - BEGIN; + BEGIN; step s2-insert-to-reference-table: - INSERT INTO test_reference_table VALUES (6); + INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); + +step s2-commit: + COMMIT; +step s1-add-second-worker: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -598,13 +606,13 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -612,19 +620,19 @@ step s1-add-second-worker: (1 row) step s2-ddl-on-reference-table: - CREATE INDEX reference_index ON test_reference_table(test_id); + CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -645,19 +653,19 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-begin: - BEGIN; + BEGIN; step s2-ddl-on-reference-table: - CREATE INDEX reference_index ON test_reference_table(test_id); + CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -666,12 +674,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -692,13 +700,13 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -706,7 +714,7 @@ step s1-add-second-worker: (1 row) step s2-create-reference-table-2: - SELECT create_reference_table('test_reference_table_2'); + SELECT create_reference_table('test_reference_table_2'); step s1-commit: COMMIT; @@ -718,12 +726,12 @@ create_reference_table (1 row) step s2-print-content-2: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table_2', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table_2', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -745,13 +753,13 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s2-begin: - BEGIN; + BEGIN; step s2-create-reference-table-2: - SELECT create_reference_table('test_reference_table_2'); + SELECT create_reference_table('test_reference_table_2'); create_reference_table --------------------------------------------------------------------- @@ -759,10 +767,10 @@ create_reference_table (1 row) step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -771,12 +779,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-content-2: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table_2', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table_2', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -797,13 +805,13 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -811,19 +819,19 @@ step s1-add-second-worker: (1 row) step s2-copy-to-reference-table: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -845,19 +853,19 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s2-begin: - BEGIN; + BEGIN; step s2-copy-to-reference-table: - COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; + COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5'; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -866,12 +874,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -893,13 +901,13 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -907,19 +915,19 @@ step s1-add-second-worker: (1 row) step s2-insert-to-reference-table: - INSERT INTO test_reference_table VALUES (6); + INSERT INTO test_reference_table VALUES (6); step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -941,19 +949,19 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s2-begin: - BEGIN; + BEGIN; step s2-insert-to-reference-table: - INSERT INTO test_reference_table VALUES (6); + INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -962,12 +970,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-content: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -989,13 +997,13 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1003,19 +1011,19 @@ step s1-add-second-worker: (1 row) step s2-ddl-on-reference-table: - CREATE INDEX reference_index ON test_reference_table(test_id); + CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -1037,19 +1045,19 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s2-begin: - BEGIN; + BEGIN; step s2-ddl-on-reference-table: - CREATE INDEX reference_index ON test_reference_table(test_id); + CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -1058,12 +1066,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-index-count: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -1085,13 +1093,13 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s1-begin: BEGIN; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1099,7 +1107,7 @@ step s1-add-second-worker: (1 row) step s2-create-reference-table-2: - SELECT create_reference_table('test_reference_table_2'); + SELECT create_reference_table('test_reference_table_2'); step s1-commit: COMMIT; @@ -1111,12 +1119,12 @@ create_reference_table (1 row) step s2-print-content-2: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table_2', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table_2', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -1138,13 +1146,13 @@ create_distributed_table (1 row) step s1-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO on; + SET citus.replicate_reference_tables_on_activate TO on; step s2-begin: - BEGIN; + BEGIN; step s2-create-reference-table-2: - SELECT create_reference_table('test_reference_table_2'); + SELECT create_reference_table('test_reference_table_2'); create_reference_table --------------------------------------------------------------------- @@ -1152,10 +1160,10 @@ create_reference_table (1 row) step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-second-worker: <... completed> ?column? @@ -1164,12 +1172,12 @@ step s1-add-second-worker: <... completed> (1 row) step s2-print-content-2: - SELECT - nodeport, success, result - FROM - run_command_on_placements('test_reference_table_2', 'select count(*) from %s') - ORDER BY - nodeport; + SELECT + nodeport, success, result + FROM + run_command_on_placements('test_reference_table_2', 'select count(*) from %s') + ORDER BY + nodeport; nodeport|success|result --------------------------------------------------------------------- @@ -1191,10 +1199,10 @@ create_distributed_table (1 row) step s1-do-not-replicate-on-activate: - SET citus.replicate_reference_tables_on_activate TO off; + SET citus.replicate_reference_tables_on_activate TO off; step s1-add-second-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1202,18 +1210,18 @@ step s1-add-second-worker: (1 row) step s2-begin: - BEGIN; + BEGIN; step s1-begin: BEGIN; step s1-drop-reference-table: - DROP TABLE test_reference_table; + DROP TABLE test_reference_table; step s2-replicate-reference-tables: - SET client_min_messages TO DEBUG2; - SELECT replicate_reference_tables(); - RESET client_min_messages; + SET client_min_messages TO DEBUG2; + SELECT replicate_reference_tables(); + RESET client_min_messages; step s1-commit: COMMIT; @@ -1225,7 +1233,7 @@ replicate_reference_tables (1 row) step s2-commit: - COMMIT; + COMMIT; master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_concurrent_dml.out b/src/test/regress/expected/isolation_concurrent_dml.out index 112f8a4b8..eca23913e 100644 --- a/src/test/regress/expected/isolation_concurrent_dml.out +++ b/src/test/regress/expected/isolation_concurrent_dml.out @@ -1,7 +1,7 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-update s1-commit -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -26,7 +26,7 @@ restore_isolation_tester_func starting permutation: s1-insert s2-update -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -44,7 +44,7 @@ restore_isolation_tester_func starting permutation: s1-begin s1-multi-insert s2-update s1-commit -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -69,7 +69,7 @@ restore_isolation_tester_func starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -93,7 +93,7 @@ restore_isolation_tester_func starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/isolation_create_restore_point.out b/src/test/regress/expected/isolation_create_restore_point.out index 28e8e212b..bf71fa5ed 100644 --- a/src/test/regress/expected/isolation_create_restore_point.out +++ b/src/test/regress/expected/isolation_create_restore_point.out @@ -7,12 +7,12 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-create-distributed: - CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); - SELECT create_distributed_table('test_create_distributed_table', 'test_id'); + CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); + SELECT create_distributed_table('test_create_distributed_table', 'test_id'); create_distributed_table --------------------------------------------------------------------- @@ -20,10 +20,10 @@ create_distributed_table (1 row) step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -39,14 +39,14 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-insert: - INSERT INTO restore_table VALUES (1,'hello'); + INSERT INTO restore_table VALUES (1,'hello'); step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -54,7 +54,7 @@ step s2-create-restore: (1 row) step s1-commit: - COMMIT; + COMMIT; starting permutation: s1-begin s1-modify-multiple s2-create-restore s1-commit @@ -64,14 +64,14 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-modify-multiple: - UPDATE restore_table SET data = 'world'; + UPDATE restore_table SET data = 'world'; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -79,7 +79,7 @@ step s2-create-restore: (1 row) step s1-commit: - COMMIT; + COMMIT; starting permutation: s1-begin s1-ddl s2-create-restore s1-commit @@ -89,17 +89,17 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-ddl: - ALTER TABLE restore_table ADD COLUMN x int; + ALTER TABLE restore_table ADD COLUMN x int; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -115,14 +115,14 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-copy: - COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; + COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -130,7 +130,7 @@ step s2-create-restore: (1 row) step s1-commit: - COMMIT; + COMMIT; starting permutation: s1-begin s1-recover s2-create-restore s1-commit @@ -140,11 +140,11 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-recover: - SELECT recover_prepared_transactions(); + SELECT recover_prepared_transactions(); recover_prepared_transactions --------------------------------------------------------------------- @@ -152,10 +152,10 @@ recover_prepared_transactions (1 row) step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -171,17 +171,17 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-drop: - DROP TABLE restore_table; + DROP TABLE restore_table; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -197,11 +197,11 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-add-node: - SELECT 1 FROM master_add_inactive_node('localhost', 9999); + SELECT 1 FROM master_add_inactive_node('localhost', 9999); ?column? --------------------------------------------------------------------- @@ -209,10 +209,10 @@ step s1-add-node: (1 row) step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -228,11 +228,11 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-remove-node: - SELECT master_remove_node('localhost', 9999); + SELECT master_remove_node('localhost', 9999); master_remove_node --------------------------------------------------------------------- @@ -240,10 +240,10 @@ master_remove_node (1 row) step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -259,11 +259,11 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test-2'); + SELECT 1 FROM citus_create_restore_point('citus-test-2'); ?column? --------------------------------------------------------------------- @@ -271,10 +271,10 @@ step s1-create-restore: (1 row) step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -290,10 +290,10 @@ create_reference_table (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -301,10 +301,10 @@ step s2-create-restore: (1 row) step s1-modify-multiple: - UPDATE restore_table SET data = 'world'; + UPDATE restore_table SET data = 'world'; step s2-commit: - COMMIT; + COMMIT; step s1-modify-multiple: <... completed> @@ -315,10 +315,10 @@ create_reference_table (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -326,10 +326,10 @@ step s2-create-restore: (1 row) step s1-ddl: - ALTER TABLE restore_table ADD COLUMN x int; + ALTER TABLE restore_table ADD COLUMN x int; step s2-commit: - COMMIT; + COMMIT; step s1-ddl: <... completed> @@ -340,10 +340,10 @@ create_reference_table (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -351,14 +351,14 @@ step s2-create-restore: (1 row) step s1-multi-statement: - SET citus.multi_shard_commit_protocol TO '2pc'; - BEGIN; - INSERT INTO restore_table VALUES (1,'hello'); - INSERT INTO restore_table VALUES (2,'hello'); - COMMIT; + SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + INSERT INTO restore_table VALUES (1,'hello'); + INSERT INTO restore_table VALUES (2,'hello'); + COMMIT; step s2-commit: - COMMIT; + COMMIT; step s1-multi-statement: <... completed> @@ -369,12 +369,12 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-create-reference: - CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text); - SELECT create_reference_table('test_create_reference_table'); + CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text); + SELECT create_reference_table('test_create_reference_table'); create_reference_table --------------------------------------------------------------------- @@ -382,10 +382,10 @@ create_reference_table (1 row) step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -401,23 +401,24 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-insert-ref: - INSERT INTO restore_ref_table VALUES (1,'hello'); + INSERT INTO restore_ref_table VALUES (1,'hello'); step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); + +step s1-commit: + COMMIT; +step s2-create-restore: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s1-commit: - COMMIT; - starting permutation: s1-begin s1-modify-multiple-ref s2-create-restore s1-commit create_reference_table @@ -426,23 +427,24 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-modify-multiple-ref: - UPDATE restore_ref_table SET data = 'world'; + UPDATE restore_ref_table SET data = 'world'; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); + +step s1-commit: + COMMIT; +step s2-create-restore: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s1-commit: - COMMIT; - starting permutation: s1-begin s1-ddl-ref s2-create-restore s1-commit create_reference_table @@ -451,17 +453,17 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-ddl-ref: - ALTER TABLE restore_ref_table ADD COLUMN x int; + ALTER TABLE restore_ref_table ADD COLUMN x int; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -477,23 +479,24 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-copy-ref: - COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV; + COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); + +step s1-commit: + COMMIT; +step s2-create-restore: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s1-commit: - COMMIT; - starting permutation: s1-begin s1-drop-ref s2-create-restore s1-commit create_reference_table @@ -502,17 +505,17 @@ create_reference_table (1 row) step s1-begin: - BEGIN; - SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + SET citus.multi_shard_commit_protocol TO '2pc'; step s1-drop-ref: - DROP TABLE restore_ref_table; + DROP TABLE restore_ref_table; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: - COMMIT; + COMMIT; step s2-create-restore: <... completed> ?column? @@ -528,10 +531,10 @@ create_reference_table (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -539,10 +542,10 @@ step s2-create-restore: (1 row) step s1-modify-multiple-ref: - UPDATE restore_ref_table SET data = 'world'; + UPDATE restore_ref_table SET data = 'world'; step s2-commit: - COMMIT; + COMMIT; step s1-modify-multiple-ref: <... completed> @@ -553,10 +556,10 @@ create_reference_table (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -564,10 +567,10 @@ step s2-create-restore: (1 row) step s1-ddl-ref: - ALTER TABLE restore_ref_table ADD COLUMN x int; + ALTER TABLE restore_ref_table ADD COLUMN x int; step s2-commit: - COMMIT; + COMMIT; step s1-ddl-ref: <... completed> @@ -578,10 +581,10 @@ create_reference_table (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-restore: - SELECT 1 FROM citus_create_restore_point('citus-test'); + SELECT 1 FROM citus_create_restore_point('citus-test'); ?column? --------------------------------------------------------------------- @@ -589,13 +592,13 @@ step s2-create-restore: (1 row) step s1-multi-statement-ref: - SET citus.multi_shard_commit_protocol TO '2pc'; - BEGIN; - INSERT INTO restore_ref_table VALUES (1,'hello'); - INSERT INTO restore_ref_table VALUES (2,'hello'); - COMMIT; + SET citus.multi_shard_commit_protocol TO '2pc'; + BEGIN; + INSERT INTO restore_ref_table VALUES (1,'hello'); + INSERT INTO restore_ref_table VALUES (2,'hello'); + COMMIT; step s2-commit: - COMMIT; + COMMIT; step s1-multi-statement-ref: <... completed> diff --git a/src/test/regress/expected/isolation_dml_vs_repair.out b/src/test/regress/expected/isolation_dml_vs_repair.out index cb1e07219..1ea19f6a5 100644 --- a/src/test/regress/expected/isolation_dml_vs_repair.out +++ b/src/test/regress/expected/isolation_dml_vs_repair.out @@ -1,7 +1,7 @@ Parsed test spec with 2 sessions starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -29,7 +29,7 @@ master_copy_shard_placement starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -60,7 +60,7 @@ master_copy_shard_placement starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -113,7 +113,7 @@ test_id|data starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) @@ -171,7 +171,7 @@ test_id|data starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display -master_create_worker_shards +create_distributed_table --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out index 1db575034..38e185202 100644 --- a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out +++ b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out @@ -235,7 +235,7 @@ restore_isolation_tester_func (1 row) -starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection +starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-flaky-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); @@ -260,8 +260,8 @@ run_commands_on_session_level_connection_to_node (1 row) -step s2-coordinator-create-index-concurrently: - CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id); +step s2-flaky-coordinator-create-index-concurrently: + CREATE INDEX CONCURRENTLY flaky_dist_table_index_conc ON dist_table(id); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); diff --git a/src/test/regress/expected/isolation_hash_copy_vs_all.out b/src/test/regress/expected/isolation_hash_copy_vs_all.out index 9e2393ae0..bbf9e9c6b 100644 --- a/src/test/regress/expected/isolation_hash_copy_vs_all.out +++ b/src/test/regress/expected/isolation_hash_copy_vs_all.out @@ -317,7 +317,7 @@ restore_isolation_tester_func (1 row) -starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes +starting permutation: s1-initialize s1-begin s1-copy s2-flaky-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table --------------------------------------------------------------------- @@ -326,9 +326,8 @@ create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY hash_copy_index ON hash_copy(id); +step s2-flaky-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY flaky_hash_copy_index ON hash_copy(id); step s1-commit: COMMIT; -step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_master_update_node_2.out b/src/test/regress/expected/isolation_master_update_node_2.out new file mode 100644 index 000000000..46e0d23d5 --- /dev/null +++ b/src/test/regress/expected/isolation_master_update_node_2.out @@ -0,0 +1,68 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: BEGIN; +step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); +step s2-begin: BEGIN; +step s2-update-node-1: + -- update a specific node by address + SELECT master_update_node(nodeid, 'localhost', nodeport + 10) + FROM pg_dist_node + WHERE nodename = 'localhost' + AND nodeport = 57637; + +step s1-abort: ABORT; +step s2-update-node-1: <... completed> +master_update_node +--------------------------------------------------------------------- + +(1 row) + +step s2-abort: ABORT; +master_remove_node +--------------------------------------------------------------------- + + +(2 rows) + + +starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: BEGIN; +step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); +step s2-begin: BEGIN; +step s2-update-node-1-force: + -- update a specific node by address (force) + SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) + FROM pg_dist_node + WHERE nodename = 'localhost' + AND nodeport = 57637; + +step s2-update-node-1-force: <... completed> +master_update_node +--------------------------------------------------------------------- + +(1 row) + +step s2-abort: ABORT; +step s1-abort: ABORT; +FATAL: terminating connection due to administrator command +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. + +master_remove_node +--------------------------------------------------------------------- + + +(2 rows) + diff --git a/src/test/regress/expected/isolation_reference_copy_vs_all.out b/src/test/regress/expected/isolation_reference_copy_vs_all.out index c73cc3031..b7703be8e 100644 --- a/src/test/regress/expected/isolation_reference_copy_vs_all.out +++ b/src/test/regress/expected/isolation_reference_copy_vs_all.out @@ -319,7 +319,7 @@ restore_isolation_tester_func (1 row) -starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes +starting permutation: s1-initialize s1-begin s1-copy s2-flaky-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table --------------------------------------------------------------------- @@ -328,9 +328,8 @@ create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY reference_copy_index ON reference_copy(id); +step s2-flaky-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY flaky_reference_copy_index ON reference_copy(id); step s1-commit: COMMIT; -step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index a2792d0c4..7a948d6f9 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -91,7 +91,7 @@ step s1-update-ref-table: update ref_table set a = a + 1; step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -112,15 +112,17 @@ query |state |wait_event_type|wa (2 rows) step s2-view-worker: - SELECT query, state, wait_event_type, wait_event, usename, datname + SELECT query, state, wait_event_type, wait_event, usename, datname FROM citus_stat_activity WHERE query NOT ILIKE ALL(VALUES + ('%application_name%'), ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), ('%citus_internal_local_blocked_processes%'), ('%add_node%'), - ('%csa_from_one_node%')) + ('%csa_from_one_node%'), + ('%pg_locks%')) AND is_worker_query = true AND backend_type = 'client backend' AND query != '' @@ -128,8 +130,8 @@ step s2-view-worker: query |state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- -UPDATE public.ref_table_1500777 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500777 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500877 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500877 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|idle in transaction|Client |ClientRead|postgres|regression (2 rows) step s2-end: @@ -168,9 +170,9 @@ step s1-update-ref-table: update ref_table set a = a + 1; step s2-active-transactions: - -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; - SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; + -- Admin should be able to see all transactions + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_select_vs_all_on_mx.out index b3e529b3b..90c296404 100644 --- a/src/test/regress/expected/isolation_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_select_vs_all_on_mx.out @@ -515,7 +515,7 @@ restore_isolation_tester_func (1 row) -starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-disable-binary-protocol-on-worker s1-select s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection +starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-disable-binary-protocol-on-worker s1-select s2-flaky-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); @@ -549,8 +549,8 @@ run_commands_on_session_level_connection_to_node (1 row) -step s2-coordinator-create-index-concurrently: - CREATE INDEX CONCURRENTLY select_table_index ON select_table(id); +step s2-flaky-coordinator-create-index-concurrently: + CREATE INDEX CONCURRENTLY flaky_select_table_index ON select_table(id); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); diff --git a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out index e946a0025..5a4b244f0 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out @@ -18,7 +18,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -42,7 +42,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -138,7 +138,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -210,7 +210,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-select: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO truncate_table SELECT * FROM truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO truncate_table SELECT * FROM data_table'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -234,7 +234,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -330,7 +330,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -402,7 +402,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy: - SELECT run_commands_on_session_level_connection_to_node('COPY truncate_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY truncate_table FROM PROGRAM ''echo 6, 60 && echo 9, 90 && echo 10, 100''WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -426,7 +426,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -504,7 +504,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit: COMMIT; @@ -587,7 +587,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-truncate: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -640,3 +640,59 @@ restore_isolation_tester_func (1 row) + +starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-truncate s3-select-count-from-ref-table s1-commit-worker s1-stop-connection +step s1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-truncate: + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-select-count-from-ref-table: + SELECT COUNT(*) FROM referencing_table_2; + +step s1-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s3-select-count-from-ref-table: <... completed> +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_undistribute_table.out b/src/test/regress/expected/isolation_undistribute_table.out index d861b5af8..4048f0f52 100644 --- a/src/test/regress/expected/isolation_undistribute_table.out +++ b/src/test/regress/expected/isolation_undistribute_table.out @@ -251,13 +251,18 @@ step s2-truncate: step s1-commit: COMMIT; +s2: WARNING: relation "public.dist_table" does not exist step s2-truncate: <... completed> +ERROR: failure on connection marked as essential: localhost:xxxxx step s2-select: SELECT * FROM dist_table ORDER BY 1, 2; a|b --------------------------------------------------------------------- -(0 rows) +1|2 +3|4 +5|6 +(3 rows) restore_isolation_tester_func --------------------------------------------------------------------- diff --git a/src/test/regress/expected/local_dist_join_mixed.out b/src/test/regress/expected/local_dist_join_mixed.out index 5566186b5..cc709b982 100644 --- a/src/test/regress/expected/local_dist_join_mixed.out +++ b/src/test/regress/expected/local_dist_join_mixed.out @@ -358,6 +358,9 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c (1 row) CREATE VIEW local_regular_view AS SELECT * FROM local; +WARNING: "view local_regular_view" has dependency to "table local" that is not in Citus' metadata +DETAIL: "view local_regular_view" will be created only locally +HINT: Distribute "table local" first to distribute "view local_regular_view" CREATE VIEW dist_regular_view AS SELECT * FROM distributed; SELECT count(*) FROM distributed JOIN local_regular_view USING (id); DEBUG: generating subplan XXX_1 for subquery SELECT local.id, local.title FROM local_dist_join_mixed.local @@ -1601,14 +1604,5 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c 101 (1 row) +SET client_min_messages TO ERROR; DROP SCHEMA local_dist_join_mixed CASCADE; -DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed schema is run. To make sure subsequent commands see the schema correctly we need to make sure to use only one connection for all future commands -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table distributed -drop cascades to table reference -drop cascades to table local -drop cascades to table unlogged_local -drop cascades to materialized view mat_view -drop cascades to view local_regular_view -drop cascades to view dist_regular_view diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 5d29a4e71..1f7e3e80c 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -398,7 +398,9 @@ INSERT INTO distributed_table VALUES (1, '22', 20); NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 (key, value, age) VALUES (1, '22'::text, 20) INSERT INTO second_distributed_table VALUES (1, '1'); NOTICE: executing the command locally: INSERT INTO local_shard_execution.second_distributed_table_1470005 (key, value) VALUES (1, '1'::text) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; b | c | d | b | c | d --------------------------------------------------------------------- @@ -1520,6 +1522,51 @@ NOTICE: executing the command locally: INSERT INTO local_shard_execution.distri EXECUTE local_multi_row_insert_prepare_params(5,11); NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (5,'55'::text,'21'::bigint) ON CONFLICT(key) WHERE ((key OPERATOR(pg_catalog.>) 3) AND (key OPERATOR(pg_catalog.<) 4)) DO UPDATE SET value = ('88'::text OPERATOR(pg_catalog.||) excluded.value) ROLLBACK; +-- make sure that we still get results if we switch off local execution +PREPARE ref_count_prepare AS SELECT count(*) FROM reference_table; +EXECUTE ref_count_prepare; +NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.reference_table_1470000 reference_table + count +--------------------------------------------------------------------- + 7 +(1 row) + +EXECUTE ref_count_prepare; +NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.reference_table_1470000 reference_table + count +--------------------------------------------------------------------- + 7 +(1 row) + +EXECUTE ref_count_prepare; +NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.reference_table_1470000 reference_table + count +--------------------------------------------------------------------- + 7 +(1 row) + +EXECUTE ref_count_prepare; +NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.reference_table_1470000 reference_table + count +--------------------------------------------------------------------- + 7 +(1 row) + +EXECUTE ref_count_prepare; +NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shard_execution.reference_table_1470000 reference_table + count +--------------------------------------------------------------------- + 7 +(1 row) + +SET citus.enable_local_execution TO off; +EXECUTE ref_count_prepare; + count +--------------------------------------------------------------------- + 7 +(1 row) + +RESET citus.enable_local_execution; -- failures of local execution should rollback both the -- local execution and remote executions -- fail on a local execution @@ -1656,8 +1703,10 @@ NOTICE: executing the command locally: DELETE FROM local_shard_execution.distri ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution key | value | age @@ -1667,8 +1716,10 @@ NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT dist -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution.distributed_table_1470003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) key | value | age @@ -1983,6 +2034,17 @@ SELECT create_distributed_table('event_responses', 'event_id'); (1 row) INSERT INTO event_responses VALUES (1, 1, 'yes'), (2, 2, 'yes'), (3, 3, 'no'), (4, 4, 'no'); +CREATE TABLE event_responses_no_pkey ( + event_id int, + user_id int, + response invite_resp +); +SELECT create_distributed_table('event_responses_no_pkey', 'event_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + CREATE OR REPLACE FUNCTION regular_func(p invite_resp) RETURNS int AS $$ DECLARE @@ -2432,6 +2494,755 @@ DEBUG: Creating router plan 17 | 777 | no (2 rows) +-- set back to sane settings +RESET citus.enable_local_execution; +RESET citus.enable_fast_path_router_planner; +-- we'll test some 2PC states +SET citus.enable_metadata_sync TO OFF; +-- coordinated_transaction_should_use_2PC prints the internal +-- state for 2PC decision on Citus. However, even if 2PC is decided, +-- we may not necessarily use 2PC over a connection unless it does +-- a modification +CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() +RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', +$$coordinated_transaction_should_use_2PC$$; +-- make tests consistent +SET citus.max_adaptive_executor_pool_size TO 1; +RESET citus.enable_metadata_sync; +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +SET citus.log_remote_commands TO ON; +-- we use event_id = 2 for local execution and event_id = 1 for reemote execution +--show it here, if anything changes here, all the tests below might be broken +-- we prefer this to avoid excessive logging below +SELECT * FROM event_responses_no_pkey WHERE event_id = 2; +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 2 +NOTICE: executing the command locally: SELECT event_id, user_id, response FROM public.event_responses_no_pkey_1480007 event_responses_no_pkey WHERE (event_id OPERATOR(pg_catalog.=) 2) + event_id | user_id | response +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM event_responses_no_pkey WHERE event_id = 1; +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: query has a single distribution column value: 1 +NOTICE: issuing SELECT event_id, user_id, response FROM public.event_responses_no_pkey_1480004 event_responses_no_pkey WHERE (event_id OPERATOR(pg_catalog.=) 1) + event_id | user_id | response +--------------------------------------------------------------------- +(0 rows) + +RESET citus.log_remote_commands; +RESET citus.log_local_commands; +RESET client_min_messages; +-- single shard local command without transaction block does set the +-- internal state for 2PC, but does not require any actual entries +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *) +SELECT coordinated_transaction_should_use_2PC() FROM cte_1; + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + t +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- two local commands without transaction block set the internal 2PC state +-- but does not use remotely +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + t +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local modification followed by another single shard +-- local modification sets the 2PC state, but does not use remotely +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 2 | 2 | yes +(1 row) + + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 2 | 2 | yes +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + t +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local modification followed by a single shard +-- remote modification uses 2PC because multiple nodes involved +-- in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 2 | 2 | yes +(1 row) + + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 1 | 2 | yes +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + t +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local modification followed by a single shard +-- remote modification uses 2PC even if it is not in an explicit +-- tx block as multiple nodes involved in the modification +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + t +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard remote modification followed by a single shard +-- local modification uses 2PC as multiple nodes involved +-- in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 1 | 2 | yes +(1 row) + + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 2 | 2 | yes +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + t +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard remote modification followed by a single shard +-- local modification uses 2PC even if it is not in an explicit +-- tx block +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + t +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local SELECT command without transaction block does not set the +-- internal state for 2PC +WITH cte_1 AS (SELECT * FROM event_responses_no_pkey WHERE event_id = 2) +SELECT coordinated_transaction_should_use_2PC() FROM cte_1; +ERROR: The transaction is not a coordinated transaction +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- two local SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2), + cte_2 AS (SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2) +SELECT count(*) FROM cte_1, cte_2; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- two local SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 9 +(1 row) + + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 9 +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- a local SELECT followed by a remote SELECT does not require to +-- use actual 2PC +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 9 +(1 row) + + SELECT count(*) FROM event_responses_no_pkey; + count +--------------------------------------------------------------------- + 13 +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local SELECT followed by a single shard +-- remote modification does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + SELECT * FROM event_responses_no_pkey WHERE event_id = 2; + event_id | user_id | response +--------------------------------------------------------------------- + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes + 2 | 2 | yes +(9 rows) + + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 1 | 2 | yes +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local SELECT followed by a single shard +-- remote modification does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (SELECT * FROM event_responses_no_pkey WHERE event_id = 2), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + f +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard remote modification followed by a single shard +-- local SELECT does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 1 | 2 | yes +(1 row) + + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 9 +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard remote modification followed by a single shard +-- local SELECT does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *), + cte_2 AS (SELECT * FROM event_responses_no_pkey WHERE event_id = 2) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + f +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- multi shard local SELECT command without transaction block does not set the +-- internal state for 2PC +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey) +SELECT coordinated_transaction_should_use_2PC() FROM cte_1; + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- two multi-shard SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey), + cte_2 AS (SELECT count(*) FROM event_responses_no_pkey) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + f +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- two multi-shard SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +BEGIN; + SELECT count(*) FROM event_responses_no_pkey; + count +--------------------------------------------------------------------- + 17 +(1 row) + + SELECT count(*) FROM event_responses_no_pkey; + count +--------------------------------------------------------------------- + 17 +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- multi-shard shard SELECT followed by a single shard +-- remote modification does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + SELECT count(*) FROM event_responses_no_pkey; + count +--------------------------------------------------------------------- + 17 +(1 row) + + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 1 | 2 | yes +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- multi shard SELECT followed by a single shard +-- remote single shard modification does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + f +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard remote modification followed by a multi shard +-- SELECT does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + event_id | user_id | response +--------------------------------------------------------------------- + 1 | 2 | yes +(1 row) + + SELECT count(*) FROM event_responses_no_pkey; + count +--------------------------------------------------------------------- + 20 +(1 row) + + SELECT coordinated_transaction_should_use_2PC(); + coordinated_transaction_should_use_2pc +--------------------------------------------------------------------- + f +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard remote modification followed by a multi shard +-- SELECT does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *), + cte_2 AS (SELECT count(*) FROM event_responses_no_pkey) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + f +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- single shard local modification followed by remote multi-shard +-- modification uses 2PC as multiple nodes are involved in modifications +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *), + cte_2 AS (UPDATE event_responses_no_pkey SET user_id = 1000 RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; + bool_or +--------------------------------------------------------------------- + t +(1 row) + +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- a local SELECT followed by a remote multi-shard UPDATE requires to +-- use actual 2PC as multiple nodes are involved in modifications +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 10 +(1 row) + + UPDATE event_responses_no_pkey SET user_id = 1; +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- a local SELECT followed by a remote single-shard UPDATE does not require to +-- use actual 2PC. This is because a single node is involved in modification +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 10 +(1 row) + + UPDATE event_responses_no_pkey SET user_id = 1 WHERE event_id = 1; +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +-- a remote single-shard UPDATE followed by a local single shard SELECT +-- does not require to use actual 2PC. This is because a single node +-- is involved in modification +BEGIN; + UPDATE event_responses_no_pkey SET user_id = 1 WHERE event_id = 1; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + count +--------------------------------------------------------------------- + 10 +(1 row) + +COMMIT; +SELECT count(*) FROM pg_dist_transaction; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + \c - - - :master_port -- verify the local_hostname guc is used for local executions that should connect to the -- local host diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index c297f0a99..200b99872 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -334,7 +334,9 @@ NOTICE: executing the command locally: SELECT key, value FROM local_shard_execu -- Put row back for other tests INSERT INTO distributed_table VALUES (1, '22', 20); NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 (key, value, age) VALUES (1, '22'::text, 20) +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; b | c | d | b | c | d --------------------------------------------------------------------- @@ -1650,8 +1652,10 @@ NOTICE: executing the command locally: DELETE FROM local_shard_execution_replic ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table WHERE (distributed_table.key OPERATOR(pg_catalog.=) 500)) v_local_query_execution key | value | age @@ -1661,8 +1665,10 @@ NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT dist -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; NOTICE: executing the command locally: SELECT key, value, age FROM (SELECT distributed_table.key, distributed_table.value, distributed_table.age FROM local_shard_execution_replicated.distributed_table_1500003 distributed_table) v_local_query_execution_2 WHERE (key OPERATOR(pg_catalog.=) 500) key | value | age diff --git a/src/test/regress/expected/local_table_join.out b/src/test/regress/expected/local_table_join.out index effe23b0d..90737a2ed 100644 --- a/src/test/regress/expected/local_table_join.out +++ b/src/test/regress/expected/local_table_join.out @@ -991,6 +991,9 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c (1 row) CREATE view loc_view AS SELECT * FROM postgres_table WHERE key > 0; +WARNING: "view loc_view" has dependency to "table postgres_table" that is not in Citus' metadata +DETAIL: "view loc_view" will be created only locally +HINT: Distribute "table postgres_table" first to distribute "view loc_view" UPDATE loc_view SET key = (SELECT COUNT(*) FROM distributed_table); DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS count FROM local_table_join.distributed_table DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE local_table_join.postgres_table SET key = (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) FROM local_table_join.postgres_table WHERE (postgres_table.key OPERATOR(pg_catalog.>) 0) diff --git a/src/test/regress/expected/master_copy_shard_placement.out b/src/test/regress/expected/master_copy_shard_placement.out index 35ca01498..6d7fd4a69 100644 --- a/src/test/regress/expected/master_copy_shard_placement.out +++ b/src/test/regress/expected/master_copy_shard_placement.out @@ -112,12 +112,14 @@ SET citus.shard_replication_factor TO 1; -- metadata sync will succeed even if we have rep > 1 tables INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0); +SET client_min_messages TO warning; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) +RESET client_min_messages; CREATE TABLE mx_table(a int); SELECT create_distributed_table('mx_table', 'a'); create_distributed_table diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 0903e0e36..fce2bcbd2 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -15,6 +15,21 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); 1 (1 row) +-- I am coordinator +SELECT citus_is_coordinator(); + citus_is_coordinator +--------------------------------------------------------------------- + t +(1 row) + +-- workers are not coordinator +SELECT result FROM run_command_on_workers('SELECT citus_is_coordinator()'); + result +--------------------------------------------------------------------- + f + f +(2 rows) + -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes @@ -114,6 +129,54 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); 1 (1 row) +-- disable node with sync/force options +SELECT citus_disable_node('localhost', :worker_1_port); +ERROR: disabling the first worker node in the metadata is not allowed +DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations. +HINT: You can force disabling node, SELECT citus_disable_node('localhost', 57637, synchronous:=true); +SELECT citus_disable_node('localhost', :worker_1_port, synchronous:=true); + citus_disable_node +--------------------------------------------------------------------- + +(1 row) + +SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57638,t,"{f,t}") +(1 row) + +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- disable node with sync/force options +SELECT citus_disable_node('localhost', :worker_2_port, synchronous:=true); + citus_disable_node +--------------------------------------------------------------------- + +(1 row) + +SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{t,f}") +(1 row) + +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); create_distributed_table @@ -1128,6 +1191,20 @@ WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER SELECT * from master_set_node_property('localhost', :worker_2_port, 'bogusproperty', false); ERROR: only the 'shouldhaveshards' property can be set using this function DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated; +BEGIN; + SELECT start_metadata_sync_to_all_nodes(); + start_metadata_sync_to_all_nodes +--------------------------------------------------------------------- + t +(1 row) + +COMMIT; +SELECT start_metadata_sync_to_all_nodes(); + start_metadata_sync_to_all_nodes +--------------------------------------------------------------------- + t +(1 row) + -- verify that at the end of this file, all primary nodes have metadata synced SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; ?column? diff --git a/src/test/regress/expected/multi_drop_extension.out b/src/test/regress/expected/multi_drop_extension.out index 21d20ab47..2b2175367 100644 --- a/src/test/regress/expected/multi_drop_extension.out +++ b/src/test/regress/expected/multi_drop_extension.out @@ -74,6 +74,67 @@ DROP SCHEMA test_schema CASCADE; NOTICE: drop cascades to 2 other objects DROP EXTENSION citus CASCADE; \set VERBOSITY DEFAULT +-- Test if metadatacache is cleared after a rollback +BEGIN; +CREATE EXTENSION citus; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; +-- Test if metadatacache is cleared for rollback subtransations +BEGIN; +SAVEPOINT my_savepoint; +CREATE EXTENSION citus; +ROLLBACK TO SAVEPOINT my_savepoint; +CREATE EXTENSION citus; +COMMIT; +DROP EXTENSION citus; +-- Test if metadatacache is cleared if subtransaction commits but parent rollsback +BEGIN; +SAVEPOINT my_savepoint; +CREATE EXTENSION citus; +RELEASE SAVEPOINT my_savepoint; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; +-- Test if metadatacache is cleared if we release a savepoint and rollback +BEGIN; +SAVEPOINT s1; +SAVEPOINT s2; +CREATE EXTENSION citus; +RELEASE SAVEPOINT s1; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; +-- Test if metadatacache is cleared on a rollback in a nested subtransaction +BEGIN; +SAVEPOINT s1; +SAVEPOINT s2; +CREATE EXTENSION citus; +ROLLBACK TO s1; +CREATE EXTENSION citus; +COMMIT; +DROP EXTENSION citus; +-- Test if metadatacache is cleared after columnar table is made and rollback happens +BEGIN; +SAVEPOINT s1; +CREATE EXTENSION citus; +SAVEPOINT s2; +CREATE TABLE foo1 (i int) using columnar; +SAVEPOINT s3; +ROLLBACK TO SAVEPOINT s1; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; +-- Test with a release and rollback in transactions +BEGIN; +SAVEPOINT s1; +SAVEPOINT s2; +CREATE EXTENSION citus; +RELEASE SAVEPOINT s1; +SAVEPOINT s3; +SAVEPOINT s4; +ROLLBACK TO SAVEPOINT s3; +ROLLBACK; CREATE EXTENSION citus; -- this function is dropped in Citus10, added here for tests CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer, diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 3c3c5b2ae..3ff6f2350 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -94,7 +94,7 @@ FROM pg_depend AS pgd, WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND - pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar') + pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') ORDER BY 1, 2; type | identity --------------------------------------------------------------------- @@ -103,6 +103,7 @@ ORDER BY 1, 2; -- DROP EXTENSION pre-created by the regression suite DROP EXTENSION citus; +DROP EXTENSION citus_columnar; \c -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created SET citus.enable_metadata_sync TO 'false'; @@ -759,9 +760,9 @@ DROP TABLE columnar_table; ERROR: loaded Citus library version differs from installed extension version CREATE INDEX ON columnar_table (a); ERROR: loaded Citus library version differs from installed extension version -SELECT alter_columnar_table_set('columnar_table', compression => 'pglz'); +ALTER TABLE columnar_table SET (columnar.compression = pglz); ERROR: loaded Citus library version differs from installed extension version -SELECT alter_columnar_table_reset('columnar_table'); +ALTER TABLE columnar_table RESET (columnar.compression); ERROR: loaded Citus library version differs from installed extension version INSERT INTO columnar_table SELECT * FROM columnar_table; ERROR: loaded Citus library version differs from installed extension version @@ -1031,11 +1032,36 @@ SELECT * FROM multi_extension.print_extension_changes(); | view citus_stat_activity (41 rows) +-- Snapshot of state at 11.0-2 +ALTER EXTENSION citus UPDATE TO '11.0-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- + | function citus_is_coordinator() boolean + | function run_command_on_coordinator(text,boolean) SETOF record + | function start_metadata_sync_to_all_nodes() boolean +(3 rows) + +-- Test downgrade script (result should be empty) +ALTER EXTENSION citus UPDATE TO '11.0-1'; +ALTER EXTENSION citus UPDATE TO '11.0-2'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + -- Snapshot of state at 11.1-1 ALTER EXTENSION citus UPDATE TO '11.1-1'; SELECT * FROM multi_extension.print_extension_changes(); previous_object | current_object --------------------------------------------------------------------- + access method columnar | + function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) void | + function alter_columnar_table_set(regclass,integer,integer,name,integer) void | + function citus_internal.columnar_ensure_am_depends_catalog() void | + function citus_internal.downgrade_columnar_storage(regclass) void | + function citus_internal.upgrade_columnar_storage(regclass) void | + function columnar.columnar_handler(internal) table_am_handler | function worker_cleanup_job_schema_cache() void | function worker_create_schema(bigint,text) void | function worker_fetch_foreign_file(text,text,bigint,text[],integer[]) void | @@ -1044,7 +1070,21 @@ SELECT * FROM multi_extension.print_extension_changes(); function worker_merge_files_into_table(bigint,integer,text[],text[]) void | function worker_range_partition_table(bigint,integer,text,text,oid,anyarray) void | function worker_repartition_cleanup(bigint) void | -(8 rows) + schema columnar | + sequence columnar.storageid_seq | + table columnar.chunk | + table columnar.chunk_group | + table columnar.options | + table columnar.stripe | +(21 rows) + +-- Test downgrade script (result should be empty) +ALTER EXTENSION citus UPDATE TO '11.0-2'; +ALTER EXTENSION citus UPDATE TO '11.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -1062,7 +1102,7 @@ FROM pg_depend AS pgd, WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND - pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar') + pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') ORDER BY 1, 2; type | identity --------------------------------------------------------------------- @@ -1073,6 +1113,7 @@ ORDER BY 1, 2; RESET citus.enable_version_checks; RESET columnar.enable_version_checks; DROP EXTENSION citus; +DROP EXTENSION citus_columnar; CREATE EXTENSION citus VERSION '8.0-1'; ERROR: specified version incompatible with loaded Citus library DETAIL: Loaded library requires 11.1, but 8.0-1 was specified. @@ -1149,11 +1190,13 @@ NOTICE: version "9.1-1" of extension "citus" is already installed ALTER EXTENSION citus UPDATE; -- re-create in newest version DROP EXTENSION citus; +DROP EXTENSION citus_columnar; \c CREATE EXTENSION citus; -- test cache invalidation in workers \c - - - :worker_1_port DROP EXTENSION citus; +DROP EXTENSION citus_columnar; SET citus.enable_version_checks TO 'false'; SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index 91a65dc02..090ebdd4e 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -648,7 +648,7 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT alter_columnar_table_set('fix_idx_names.p2_915002', chunk_group_row_limit => 10000, stripe_row_limit => 150000, compression_level => 3, compression => 'zstd'); +NOTICE: issuing ALTER TABLE fix_idx_names.p2_915002 SET (columnar.chunk_group_row_limit = 10000, columnar.stripe_row_limit = 150000, columnar.compression_level = 3, columnar.compression = 'zstd'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'ALTER TABLE fix_idx_names.p2 OWNER TO postgres') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -662,9 +662,9 @@ NOTICE: issuing CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, anoth DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT alter_columnar_table_set('fix_idx_names.p2', chunk_group_row_limit => 10000, stripe_row_limit => 150000, compression_level => 3, compression => 'zstd'); +NOTICE: issuing ALTER TABLE fix_idx_names.p2 SET (columnar.chunk_group_row_limit = 10000, columnar.stripe_row_limit = 150000, columnar.compression_level = 3, columnar.compression = 'zstd'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT alter_columnar_table_set('fix_idx_names.p2', chunk_group_row_limit => 10000, stripe_row_limit => 150000, compression_level => 3, compression => 'zstd'); +NOTICE: issuing ALTER TABLE fix_idx_names.p2 SET (columnar.chunk_group_row_limit = 10000, columnar.stripe_row_limit = 150000, columnar.compression_level = 3, columnar.compression = 'zstd'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/multi_follower_dml.out b/src/test/regress/expected/multi_follower_dml.out index d1c714647..d6a5acd65 100644 --- a/src/test/regress/expected/multi_follower_dml.out +++ b/src/test/regress/expected/multi_follower_dml.out @@ -282,14 +282,11 @@ DELETE FROM the_table; ERROR: cannot assign TransactionIds during recovery -- DDL is not possible TRUNCATE the_table; -ERROR: cannot acquire lock mode AccessExclusiveLock on database objects while recovery is in progress -HINT: Only RowExclusiveLock or less can be acquired on database objects during recovery. +ERROR: cannot execute LOCK TABLE during recovery TRUNCATE reference_table; -ERROR: cannot acquire lock mode AccessExclusiveLock on database objects while recovery is in progress -HINT: Only RowExclusiveLock or less can be acquired on database objects during recovery. +ERROR: cannot execute LOCK TABLE during recovery TRUNCATE citus_local_table; -ERROR: cannot acquire lock mode AccessExclusiveLock on database objects while recovery is in progress -HINT: Only RowExclusiveLock or less can be acquired on database objects during recovery. +ERROR: cannot execute LOCK TABLE during recovery ALTER TABLE the_table ADD COLUMN c int; ERROR: cannot acquire lock mode AccessExclusiveLock on database objects while recovery is in progress HINT: Only RowExclusiveLock or less can be acquired on database objects during recovery. diff --git a/src/test/regress/expected/multi_generate_ddl_commands.out b/src/test/regress/expected/multi_generate_ddl_commands.out index 6aae20f9b..db211eb49 100644 --- a/src/test/regress/expected/multi_generate_ddl_commands.out +++ b/src/test/regress/expected/multi_generate_ddl_commands.out @@ -166,8 +166,11 @@ SELECT master_get_table_ddl_events('fiddly_table'); ALTER TABLE public.fiddly_table OWNER TO postgres (3 rows) --- propagating views is not supported +-- propagating views is not supported if local table dependency exists CREATE VIEW local_view AS SELECT * FROM simple_table; +WARNING: "view local_view" has dependency to "table simple_table" that is not in Citus' metadata +DETAIL: "view local_view" will be created only locally +HINT: Distribute "table simple_table" first to distribute "view local_view" SELECT master_get_table_ddl_events('local_view'); ERROR: local_view is not a regular, foreign or partitioned table -- clean up diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index e10dabf2b..b2e110de9 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -31,7 +31,7 @@ SELECT master_create_empty_shard('index_test_range'); SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 2; -CREATE TABLE index_test_hash(a int, b int, c int); +CREATE TABLE index_test_hash(a int, b int, c int, a_text text, b_text text); SELECT create_distributed_table('index_test_hash', 'a', 'hash'); create_distributed_table --------------------------------------------------------------------- @@ -105,9 +105,26 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] (1 row) +CREATE FUNCTION predicate_stable() RETURNS bool IMMUTABLE +LANGUAGE plpgsql AS $$ +BEGIN + EXECUTE 'SELECT txid_current()'; + RETURN true; +END; $$; CREATE INDEX ON index_test_hash ((value_plus_one(b))); +CREATE INDEX ON index_test_hash ((value_plus_one(b) + value_plus_one(c))) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (a) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (abs(a)) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (value_plus_one(a)) WHERE c > 10; CREATE INDEX ON index_test_hash ((multi_index_statements.value_plus_one(b))); CREATE INDEX ON index_test_hash ((multi_index_statements_2.value_plus_one(b))); +CREATE INDEX ON index_test_hash (a) INCLUDE (b) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (c, (c+0)) INCLUDE (a); +CREATE INDEX ON index_test_hash (value_plus_one(a)) INCLUDE (c,b) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash ((a_text || b_text)); +CREATE INDEX ON index_test_hash ((a_text || b_text)) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash ((a_text || b_text)) WHERE (a_text || b_text) = 'ttt'; +CREATE INDEX CONCURRENTLY ON index_test_hash (a) WHERE predicate_stable(); -- Verify that we handle if not exists statements correctly CREATE INDEX lineitem_orderkey_index on public.lineitem(l_orderkey); ERROR: relation "lineitem_orderkey_index" already exists @@ -133,56 +150,6 @@ WARNING: not propagating CLUSTER command to worker nodes CREATE INDEX CONCURRENTLY local_table_index ON local_table(id); CLUSTER local_table USING local_table_index; DROP TABLE local_table; --- Verify that all indexes got created on the master node and one of the workers -SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef ---------------------------------------------------------------------- - multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON multi_index_statements.index_test_hash USING btree (a) - multi_index_statements | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON multi_index_statements.index_test_hash USING btree (a, b) - multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON multi_index_statements.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) - multi_index_statements | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON multi_index_statements.index_test_range USING btree (a) - multi_index_statements | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON multi_index_statements.index_test_range USING btree (a, b) - multi_index_statements | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON multi_index_statements.index_test_range USING btree (a, b) WHERE (c IS NOT NULL) - public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record)) - public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey) - public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) - public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC) - public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber) - public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) -(19 rows) - -\c - - - :worker_1_port -SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_%' ORDER BY relname LIMIT 1); - count ---------------------------------------------------------------------- - 9 -(1 row) - -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash_%'; - count ---------------------------------------------------------------------- - 56 -(1 row) - -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range_%'; - count ---------------------------------------------------------------------- - 6 -(1 row) - -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append_%'; - count ---------------------------------------------------------------------- - 0 -(1 row) - \c - - - :master_port SET search_path TO multi_index_statements, public; -- Verify that we error out on unsupported statement types @@ -215,34 +182,6 @@ ERROR: column "non_existent_column" does not exist CREATE INDEX ON lineitem (l_orderkey); CREATE UNIQUE INDEX ON index_test_hash(a); CREATE INDEX CONCURRENTLY ON lineitem USING hash (l_shipdate); --- Verify that none of failed indexes got created on the master node -SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef ---------------------------------------------------------------------- - multi_index_statements | index_test_hash | index_test_hash_a_idx | | CREATE UNIQUE INDEX index_test_hash_a_idx ON multi_index_statements.index_test_hash USING btree (a) - multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON multi_index_statements.index_test_hash USING btree (a) - multi_index_statements | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON multi_index_statements.index_test_hash USING btree (a, b) - multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON multi_index_statements.index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) - multi_index_statements | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON multi_index_statements.index_test_range USING btree (a) - multi_index_statements | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON multi_index_statements.index_test_range USING btree (a, b) - multi_index_statements | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON multi_index_statements.index_test_range USING btree (a, b) WHERE (c IS NOT NULL) - public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON public.lineitem USING btree (record_ne(lineitem.*, NULL::record)) - public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_l_orderkey_idx | | CREATE INDEX lineitem_l_orderkey_idx ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_l_shipdate_idx | | CREATE INDEX lineitem_l_shipdate_idx ON public.lineitem USING hash (l_shipdate) - public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey) - public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON public.lineitem USING btree (l_orderkey) - public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON public.lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) - public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON public.lineitem USING btree (l_partkey DESC) - public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON public.lineitem USING btree (l_orderkey, l_linenumber) - public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) -(22 rows) - -- -- REINDEX -- @@ -280,78 +219,71 @@ DROP INDEX index_test_hash_index_a_b; DROP INDEX index_test_hash_index_a_b_partial; -- Verify that we can drop indexes concurrently DROP INDEX CONCURRENTLY lineitem_concurrently_index; +-- Verify that all indexes got created on the coordinator node and on the workers +-- by dropping the indexes. We do this because in different PG versions, +-- the expression indexes are named differently +-- and, being able to drop the index ensures that the index names are +-- proper +CREATE OR REPLACE FUNCTION drop_all_indexes(table_name regclass) RETURNS INTEGER AS $$ +DECLARE + i RECORD; +BEGIN + FOR i IN + (SELECT indexrelid::regclass::text as relname FROM pg_index + WHERE indrelid = table_name and indexrelid::regclass::text not ilike '%pkey%') + LOOP + EXECUTE 'DROP INDEX ' || i.relname; + END LOOP; +RETURN 1; +END; +$$ LANGUAGE plpgsql; +SELECT drop_all_indexes('public.lineitem'); + drop_all_indexes +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT drop_all_indexes('index_test_range'); + drop_all_indexes +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT drop_all_indexes('index_test_hash'); + drop_all_indexes +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT drop_all_indexes('index_test_append'); + drop_all_indexes +--------------------------------------------------------------------- + 1 +(1 row) + -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%' ORDER BY 1,2; - indrelid | indexrelid + indrelid | indexrelid --------------------------------------------------------------------- - lineitem | lineitem_l_orderkey_idx - lineitem | lineitem_l_shipdate_idx -(2 rows) +(0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- - multi_index_statements | index_test_hash | index_test_hash_a_idx | | CREATE UNIQUE INDEX index_test_hash_a_idx ON multi_index_statements.index_test_hash USING btree (a) - multi_index_statements | index_test_hash | index_test_hash_expr_idx | | CREATE INDEX index_test_hash_expr_idx ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_expr_idx1 | | CREATE INDEX index_test_hash_expr_idx1 ON multi_index_statements.index_test_hash USING btree (value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_expr_idx2 | | CREATE INDEX index_test_hash_expr_idx2 ON multi_index_statements.index_test_hash USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash | index_test_hash_index_a_b_c | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c ON multi_index_statements.index_test_hash USING btree (a) INCLUDE (b, c) -(5 rows) +(0 rows) \c - - - :worker_1_port SET citus.override_table_visibility TO FALSE; SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname SIMILAR TO 'lineitem%\d' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%' ORDER BY 1,2; - indrelid | indexrelid + indrelid | indexrelid --------------------------------------------------------------------- - lineitem_360000 | lineitem_l_orderkey_idx_360000 - lineitem_360000 | lineitem_l_shipdate_idx_360000 -(2 rows) +(0 rows) SELECT * FROM pg_indexes WHERE tablename SIMILAR TO 'index_test_%\d' ORDER BY indexname; - schemaname | tablename | indexname | tablespace | indexdef + schemaname | tablename | indexname | tablespace | indexdef --------------------------------------------------------------------- - multi_index_statements | index_test_hash_102082 | index_test_hash_a_idx_102082 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102082 ON multi_index_statements.index_test_hash_102082 USING btree (a) - multi_index_statements | index_test_hash_102083 | index_test_hash_a_idx_102083 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102083 ON multi_index_statements.index_test_hash_102083 USING btree (a) - multi_index_statements | index_test_hash_102084 | index_test_hash_a_idx_102084 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102084 ON multi_index_statements.index_test_hash_102084 USING btree (a) - multi_index_statements | index_test_hash_102085 | index_test_hash_a_idx_102085 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102085 ON multi_index_statements.index_test_hash_102085 USING btree (a) - multi_index_statements | index_test_hash_102086 | index_test_hash_a_idx_102086 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102086 ON multi_index_statements.index_test_hash_102086 USING btree (a) - multi_index_statements | index_test_hash_102087 | index_test_hash_a_idx_102087 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102087 ON multi_index_statements.index_test_hash_102087 USING btree (a) - multi_index_statements | index_test_hash_102088 | index_test_hash_a_idx_102088 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102088 ON multi_index_statements.index_test_hash_102088 USING btree (a) - multi_index_statements | index_test_hash_102089 | index_test_hash_a_idx_102089 | | CREATE UNIQUE INDEX index_test_hash_a_idx_102089 ON multi_index_statements.index_test_hash_102089 USING btree (a) - multi_index_statements | index_test_hash_102082 | index_test_hash_expr_idx1_102082 | | CREATE INDEX index_test_hash_expr_idx1_102082 ON multi_index_statements.index_test_hash_102082 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102083 | index_test_hash_expr_idx1_102083 | | CREATE INDEX index_test_hash_expr_idx1_102083 ON multi_index_statements.index_test_hash_102083 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102084 | index_test_hash_expr_idx1_102084 | | CREATE INDEX index_test_hash_expr_idx1_102084 ON multi_index_statements.index_test_hash_102084 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102085 | index_test_hash_expr_idx1_102085 | | CREATE INDEX index_test_hash_expr_idx1_102085 ON multi_index_statements.index_test_hash_102085 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102086 | index_test_hash_expr_idx1_102086 | | CREATE INDEX index_test_hash_expr_idx1_102086 ON multi_index_statements.index_test_hash_102086 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102087 | index_test_hash_expr_idx1_102087 | | CREATE INDEX index_test_hash_expr_idx1_102087 ON multi_index_statements.index_test_hash_102087 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102088 | index_test_hash_expr_idx1_102088 | | CREATE INDEX index_test_hash_expr_idx1_102088 ON multi_index_statements.index_test_hash_102088 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102089 | index_test_hash_expr_idx1_102089 | | CREATE INDEX index_test_hash_expr_idx1_102089 ON multi_index_statements.index_test_hash_102089 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102082 | index_test_hash_expr_idx2_102082 | | CREATE INDEX index_test_hash_expr_idx2_102082 ON multi_index_statements.index_test_hash_102082 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102083 | index_test_hash_expr_idx2_102083 | | CREATE INDEX index_test_hash_expr_idx2_102083 ON multi_index_statements.index_test_hash_102083 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102084 | index_test_hash_expr_idx2_102084 | | CREATE INDEX index_test_hash_expr_idx2_102084 ON multi_index_statements.index_test_hash_102084 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102085 | index_test_hash_expr_idx2_102085 | | CREATE INDEX index_test_hash_expr_idx2_102085 ON multi_index_statements.index_test_hash_102085 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102086 | index_test_hash_expr_idx2_102086 | | CREATE INDEX index_test_hash_expr_idx2_102086 ON multi_index_statements.index_test_hash_102086 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102087 | index_test_hash_expr_idx2_102087 | | CREATE INDEX index_test_hash_expr_idx2_102087 ON multi_index_statements.index_test_hash_102087 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102088 | index_test_hash_expr_idx2_102088 | | CREATE INDEX index_test_hash_expr_idx2_102088 ON multi_index_statements.index_test_hash_102088 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102089 | index_test_hash_expr_idx2_102089 | | CREATE INDEX index_test_hash_expr_idx2_102089 ON multi_index_statements.index_test_hash_102089 USING btree (multi_index_statements_2.value_plus_one(b)) - multi_index_statements | index_test_hash_102082 | index_test_hash_expr_idx_102082 | | CREATE INDEX index_test_hash_expr_idx_102082 ON multi_index_statements.index_test_hash_102082 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102083 | index_test_hash_expr_idx_102083 | | CREATE INDEX index_test_hash_expr_idx_102083 ON multi_index_statements.index_test_hash_102083 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102084 | index_test_hash_expr_idx_102084 | | CREATE INDEX index_test_hash_expr_idx_102084 ON multi_index_statements.index_test_hash_102084 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102085 | index_test_hash_expr_idx_102085 | | CREATE INDEX index_test_hash_expr_idx_102085 ON multi_index_statements.index_test_hash_102085 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102086 | index_test_hash_expr_idx_102086 | | CREATE INDEX index_test_hash_expr_idx_102086 ON multi_index_statements.index_test_hash_102086 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102087 | index_test_hash_expr_idx_102087 | | CREATE INDEX index_test_hash_expr_idx_102087 ON multi_index_statements.index_test_hash_102087 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102088 | index_test_hash_expr_idx_102088 | | CREATE INDEX index_test_hash_expr_idx_102088 ON multi_index_statements.index_test_hash_102088 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102089 | index_test_hash_expr_idx_102089 | | CREATE INDEX index_test_hash_expr_idx_102089 ON multi_index_statements.index_test_hash_102089 USING btree (multi_index_statements.value_plus_one(b)) - multi_index_statements | index_test_hash_102082 | index_test_hash_index_a_b_c_102082 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102082 ON multi_index_statements.index_test_hash_102082 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102083 | index_test_hash_index_a_b_c_102083 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102083 ON multi_index_statements.index_test_hash_102083 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102084 | index_test_hash_index_a_b_c_102084 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102084 ON multi_index_statements.index_test_hash_102084 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102085 | index_test_hash_index_a_b_c_102085 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102085 ON multi_index_statements.index_test_hash_102085 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102086 | index_test_hash_index_a_b_c_102086 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102086 ON multi_index_statements.index_test_hash_102086 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102087 | index_test_hash_index_a_b_c_102087 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102087 ON multi_index_statements.index_test_hash_102087 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102088 | index_test_hash_index_a_b_c_102088 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102088 ON multi_index_statements.index_test_hash_102088 USING btree (a) INCLUDE (b, c) - multi_index_statements | index_test_hash_102089 | index_test_hash_index_a_b_c_102089 | | CREATE UNIQUE INDEX index_test_hash_index_a_b_c_102089 ON multi_index_statements.index_test_hash_102089 USING btree (a) INCLUDE (b, c) -(40 rows) +(0 rows) -- create index that will conflict with master operations CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON multi_index_statements.index_test_hash_102089(b); diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 749254292..8fbd78f65 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -1780,7 +1780,8 @@ ERROR: localhost:xxxxx is a metadata node, but is out of sync HINT: If the node is up, wait until metadata gets synced to it and try again. SELECT citus_disable_node_and_wait('localhost', :worker_1_port); ERROR: disabling the first worker node in the metadata is not allowed -HINT: You can force disabling node, but this operation might cause replicated shards to diverge: SELECT citus_disable_node('localhost', 57637, force:=true); +DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations. +HINT: You can force disabling node, SELECT citus_disable_node('localhost', 57637, synchronous:=true); CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)" PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM SELECT citus_disable_node_and_wait('localhost', :worker_2_port); diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 95a99034f..925cf7f5e 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -231,7 +231,7 @@ ABORT; -- all below 5 commands should throw no permission errors -- read columnar metadata table SELECT * FROM columnar.stripe; - storage_id | stripe_num | file_offset | data_length | column_count | chunk_row_count | row_count | chunk_group_count | first_row_number + relation | storage_id | stripe_num | file_offset | data_length | column_count | chunk_row_count | row_count | chunk_group_count | first_row_number --------------------------------------------------------------------- (0 rows) @@ -240,12 +240,7 @@ SET columnar.chunk_group_row_limit = 1050; -- create columnar table CREATE TABLE columnar_table (a int) USING columnar; -- alter a columnar table that is created by that unprivileged user -SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000); - alter_columnar_table_set ---------------------------------------------------------------------- - -(1 row) - +ALTER TABLE columnar_table SET (columnar.chunk_group_row_limit = 2000); -- insert some data and read INSERT INTO columnar_table VALUES (1), (1); SELECT * FROM columnar_table; @@ -257,25 +252,27 @@ SELECT * FROM columnar_table; -- Fail to alter a columnar table that is created by a different user SET ROLE full_access; -SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000); +ALTER TABLE columnar_table SET (columnar.chunk_group_row_limit = 2000); ERROR: must be owner of table columnar_table -- Fail to reset a columnar table value created by a different user -SELECT alter_columnar_table_reset('columnar_table', chunk_group_row_limit => true); +ALTER TABLE columnar_table RESET (columnar.chunk_group_row_limit); ERROR: must be owner of table columnar_table SET ROLE read_access; -- and drop it DROP TABLE columnar_table; -- cannot modify columnar metadata table as unprivileged user -INSERT INTO columnar.stripe VALUES(99); -ERROR: permission denied for table stripe +INSERT INTO columnar_internal.stripe VALUES(99); +ERROR: permission denied for schema columnar_internal -- Cannot drop columnar metadata table as unprivileged user. -- Privileged user also cannot drop but with a different error message. -- (since citus extension has a dependency to it) -DROP TABLE columnar.chunk; -ERROR: must be owner of table chunk --- cannot read columnar.chunk since it could expose chunk min/max values +DROP TABLE columnar_internal.chunk; +ERROR: permission denied for schema columnar_internal SELECT * FROM columnar.chunk; -ERROR: permission denied for table chunk + relation | storage_id | stripe_num | attr_num | chunk_group_num | minimum_value | maximum_value | value_stream_offset | value_stream_length | exists_stream_offset | exists_stream_length | value_compression_type | value_compression_level | value_decompressed_length | value_count +--------------------------------------------------------------------- +(0 rows) + -- test whether a read-only user can read from citus_tables view SELECT distribution_column FROM citus_tables WHERE table_name = 'test'::regclass; distribution_column diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index 5ba1566dc..ffb35b08e 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -1,6 +1,7 @@ -- Test passing off function call to mx workers CREATE SCHEMA multi_mx_function_call_delegation; SET search_path TO multi_mx_function_call_delegation, public; +\set VERBOSITY terse SET citus.shard_replication_factor TO 2; -- This table requires specific settings, create before getting into things create table mx_call_dist_table_replica(id int, val int); @@ -112,8 +113,7 @@ select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); (1 row) select multi_mx_function_call_delegation.mx_call_copy(2); -ERROR: function multi_mx_function_call_delegation.mx_call_copy(integer) does not exist -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +ERROR: function multi_mx_function_call_delegation.mx_call_copy(integer) does not exist at character 8 select squares(4); squares --------------------------------------------------------------------- @@ -133,7 +133,6 @@ select mx_call_func(2, 0); -- Mark both functions as distributed ... select create_distributed_function('mx_call_func(int,int)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -141,7 +140,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('mx_call_func_bigint(bigint,bigint)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func_bigint is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -149,7 +147,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('mx_call_func_custom_types(mx_call_enum,mx_call_enum)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func_custom_types is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -157,7 +154,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('mx_call_func_copy(int)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func_copy is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -165,7 +161,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('squares(int)'); NOTICE: procedure multi_mx_function_call_delegation.squares is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -177,11 +172,7 @@ SET client_min_messages TO DEBUG1; select mx_call_func(2, 0); DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -223,7 +214,6 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', colocate_with := 'mx_call_dist_table_bigint'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -234,7 +224,6 @@ select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', ' colocate_with := 'mx_call_dist_table_2', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -299,11 +288,7 @@ begin; select mx_call_func(2, 0); DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -333,11 +318,7 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass select mx_call_func(2, 0); DEBUG: cannot push down invalid distribution_argument_index DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -352,11 +333,7 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass select mx_call_func(2, 0); DEBUG: cannot push down invalid distribution_argument_index DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -386,11 +363,7 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_replica'::re select mx_call_func(2, 0); DEBUG: cannot push down function call for replicated distributed tables DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -420,7 +393,6 @@ BEGIN ORDER BY 1, 2; END;$$; DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- before distribution ... select mx_call_func_tbl(10); DEBUG: function does not have co-located tables @@ -433,7 +405,6 @@ DEBUG: function does not have co-located tables -- after distribution ... select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -455,10 +426,8 @@ BEGIN RAISE EXCEPTION 'error'; END;$$; DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -600,6 +569,7 @@ select start_metadata_sync_to_node('localhost', :worker_2_port); -- worker backend caches inconsistent. Reconnect to coordinator to use -- new worker connections, hence new backends. \c - - - :master_port +\set VERBOSITY terse SET search_path to multi_mx_function_call_delegation, public; SET client_min_messages TO DEBUG1; SET citus.shard_replication_factor = 1; @@ -609,10 +579,8 @@ SET citus.shard_replication_factor = 1; CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -622,11 +590,7 @@ DETAIL: A command for a distributed function is run. To make sure subsequent co select mx_call_func((select x + 1 from mx_call_add(3, 4) x), 2); DEBUG: arguments in a distributed function must not contain subqueries DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((9 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 35 @@ -636,11 +600,7 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment select mx_call_func(floor(random())::int, 2); DEBUG: arguments in a distributed function must be constant expressions DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 27 @@ -649,28 +609,16 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -- test forms we don't distribute select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- (0 rows) select mx_call_func(2, 0), mx_call_func(0, 2); DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func | mx_call_func --------------------------------------------------------------------- 29 | 27 @@ -732,24 +680,12 @@ DEBUG: pushing down the function call -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function multi_mx_function_call_delegation.mx_call_func(integer,integer) line XX at assignment -while executing command on localhost:xxxxx select mx_call_func(2, 0) from mx_call_dist_table_1 where id = 3; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function multi_mx_function_call_delegation.mx_call_func(integer,integer) line XX at assignment -while executing command on localhost:xxxxx select mx_call_func_copy(2) from mx_call_dist_table_1 where id = 3; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: SQL statement "INSERT INTO multi_mx_function_call_delegation.mx_call_dist_table_1 - SELECT s,s FROM generate_series(100, 110) s" -PL/pgSQL function multi_mx_function_call_delegation.mx_call_func_copy(integer) line XX at SQL statement -while executing command on localhost:xxxxx DO $$ BEGIN perform mx_call_func_tbl(40); END; $$; DEBUG: not pushing down function calls in a multi-statement transaction -CONTEXT: SQL statement "SELECT mx_call_func_tbl(40)" -PL/pgSQL function inline_code_block line XX at PERFORM SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; id | val --------------------------------------------------------------------- @@ -816,11 +752,11 @@ DEBUG: pushing down the function call (1 row) \c - - - :worker_1_port +\set VERBOSITY terse SET search_path TO multi_mx_function_call_delegation, public; -- create_distributed_function is disallowed from worker nodes select create_distributed_function('mx_call_func(int,int)'); ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -- show that functions can be delegated from worker nodes SET client_min_messages TO DEBUG1; SELECT mx_call_func(2, 0); @@ -835,11 +771,7 @@ BEGIN; SELECT mx_call_func(2, 0); DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 28 @@ -853,18 +785,8 @@ BEGIN END; $$ LANGUAGE plpgsql; DEBUG: not pushing down function calls in a multi-statement transaction -CONTEXT: SQL statement "SELECT mx_call_func(2, 0)" -PL/pgSQL function inline_code_block line XX at PERFORM DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -SQL statement "SELECT mx_call_func(2, 0)" -PL/pgSQL function inline_code_block line XX at PERFORM DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -SQL statement "SELECT mx_call_func(2, 0)" -PL/pgSQL function inline_code_block line XX at PERFORM -- forced calls are delegated in a transaction block BEGIN; SELECT mx_call_func_bigint_force(4, 2); @@ -883,12 +805,9 @@ BEGIN END; $$ LANGUAGE plpgsql; DEBUG: pushing down function call in a multi-statement transaction -CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" -PL/pgSQL function inline_code_block line XX at PERFORM DEBUG: pushing down the function call -CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" -PL/pgSQL function inline_code_block line XX at PERFORM \c - - - :master_port +\set VERBOSITY terse SET search_path TO multi_mx_function_call_delegation, public; RESET client_min_messages; \set VERBOSITY terse diff --git a/src/test/regress/expected/multi_mx_function_call_delegation_0.out b/src/test/regress/expected/multi_mx_function_call_delegation_0.out index 2d317b34e..6706ec6f8 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation_0.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation_0.out @@ -1,6 +1,7 @@ -- Test passing off function call to mx workers CREATE SCHEMA multi_mx_function_call_delegation; SET search_path TO multi_mx_function_call_delegation, public; +\set VERBOSITY terse SET citus.shard_replication_factor TO 2; -- This table requires specific settings, create before getting into things create table mx_call_dist_table_replica(id int, val int); @@ -112,8 +113,7 @@ select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); (1 row) select multi_mx_function_call_delegation.mx_call_copy(2); -ERROR: function multi_mx_function_call_delegation.mx_call_copy(integer) does not exist -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +ERROR: function multi_mx_function_call_delegation.mx_call_copy(integer) does not exist at character 8 select squares(4); squares --------------------------------------------------------------------- @@ -133,7 +133,6 @@ select mx_call_func(2, 0); -- Mark both functions as distributed ... select create_distributed_function('mx_call_func(int,int)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -141,7 +140,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('mx_call_func_bigint(bigint,bigint)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func_bigint is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -149,7 +147,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('mx_call_func_custom_types(mx_call_enum,mx_call_enum)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func_custom_types is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -157,7 +154,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('mx_call_func_copy(int)'); NOTICE: procedure multi_mx_function_call_delegation.mx_call_func_copy is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -165,7 +161,6 @@ DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] select create_distributed_function('squares(int)'); NOTICE: procedure multi_mx_function_call_delegation.squares is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands create_distributed_function --------------------------------------------------------------------- @@ -177,11 +172,7 @@ SET client_min_messages TO DEBUG1; select mx_call_func(2, 0); DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -223,7 +214,6 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', colocate_with := 'mx_call_dist_table_bigint'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -234,7 +224,6 @@ select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', ' colocate_with := 'mx_call_dist_table_2', force_delegation := true); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -299,11 +288,7 @@ begin; select mx_call_func(2, 0); DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -333,11 +318,7 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass select mx_call_func(2, 0); DEBUG: cannot push down invalid distribution_argument_index DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -352,11 +333,7 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass select mx_call_func(2, 0); DEBUG: cannot push down invalid distribution_argument_index DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -386,11 +363,7 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_replica'::re select mx_call_func(2, 0); DEBUG: cannot push down function call for replicated distributed tables DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 29 @@ -420,7 +393,6 @@ BEGIN ORDER BY 1, 2; END;$$; DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- before distribution ... select mx_call_func_tbl(10); DEBUG: function does not have co-located tables @@ -433,7 +405,6 @@ DEBUG: function does not have co-located tables -- after distribution ... select create_distributed_function('mx_call_func_tbl(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -455,10 +426,8 @@ BEGIN RAISE EXCEPTION 'error'; END;$$; DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -600,6 +569,7 @@ select start_metadata_sync_to_node('localhost', :worker_2_port); -- worker backend caches inconsistent. Reconnect to coordinator to use -- new worker connections, hence new backends. \c - - - :master_port +\set VERBOSITY terse SET search_path to multi_mx_function_call_delegation, public; SET client_min_messages TO DEBUG1; SET citus.shard_replication_factor = 1; @@ -609,10 +579,8 @@ SET citus.shard_replication_factor = 1; CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode -DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands create_distributed_function --------------------------------------------------------------------- @@ -622,11 +590,7 @@ DETAIL: A command for a distributed function is run. To make sure subsequent co select mx_call_func((select x + 1 from mx_call_add(3, 4) x), 2); DEBUG: arguments in a distributed function must not contain subqueries DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (9 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 35 @@ -636,11 +600,7 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment select mx_call_func(floor(random())::int, 2); DEBUG: arguments in a distributed function must be constant expressions DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 27 @@ -649,28 +609,16 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -- test forms we don't distribute select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- (0 rows) select mx_call_func(2, 0), mx_call_func(0, 2); DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (1 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func | mx_call_func --------------------------------------------------------------------- 29 | 27 @@ -732,24 +680,12 @@ DEBUG: pushing down the function call -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function multi_mx_function_call_delegation.mx_call_func(integer,integer) line XX at assignment -while executing command on localhost:xxxxx select mx_call_func(2, 0) from mx_call_dist_table_1 where id = 3; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function multi_mx_function_call_delegation.mx_call_func(integer,integer) line XX at assignment -while executing command on localhost:xxxxx select mx_call_func_copy(2) from mx_call_dist_table_1 where id = 3; ERROR: cannot execute a distributed query from a query on a shard -CONTEXT: SQL statement "INSERT INTO multi_mx_function_call_delegation.mx_call_dist_table_1 - SELECT s,s FROM generate_series(100, 110) s" -PL/pgSQL function multi_mx_function_call_delegation.mx_call_func_copy(integer) line XX at SQL statement -while executing command on localhost:xxxxx DO $$ BEGIN perform mx_call_func_tbl(40); END; $$; DEBUG: not pushing down function calls in a multi-statement transaction -CONTEXT: SQL statement "SELECT mx_call_func_tbl(40)" -PL/pgSQL function inline_code_block line XX at PERFORM SELECT * FROM mx_call_dist_table_1 WHERE id >= 40 ORDER BY id, val; id | val --------------------------------------------------------------------- @@ -816,11 +752,11 @@ DEBUG: pushing down the function call (1 row) \c - - - :worker_1_port +\set VERBOSITY terse SET search_path TO multi_mx_function_call_delegation, public; -- create_distributed_function is disallowed from worker nodes select create_distributed_function('mx_call_func(int,int)'); ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -- show that functions can be delegated from worker nodes SET client_min_messages TO DEBUG1; SELECT mx_call_func(2, 0); @@ -835,11 +771,7 @@ BEGIN; SELECT mx_call_func(2, 0); DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment mx_call_func --------------------------------------------------------------------- 28 @@ -853,18 +785,8 @@ BEGIN END; $$ LANGUAGE plpgsql; DEBUG: not pushing down function calls in a multi-statement transaction -CONTEXT: SQL statement "SELECT mx_call_func(2, 0)" -PL/pgSQL function inline_code_block line XX at PERFORM DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -SQL statement "SELECT mx_call_func(2, 0)" -PL/pgSQL function inline_code_block line XX at PERFORM DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -SQL statement "SELECT mx_call_func(2, 0)" -PL/pgSQL function inline_code_block line XX at PERFORM -- forced calls are delegated in a transaction block BEGIN; SELECT mx_call_func_bigint_force(4, 2); @@ -883,12 +805,9 @@ BEGIN END; $$ LANGUAGE plpgsql; DEBUG: pushing down function call in a multi-statement transaction -CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" -PL/pgSQL function inline_code_block line XX at PERFORM DEBUG: pushing down the function call -CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" -PL/pgSQL function inline_code_block line XX at PERFORM \c - - - :master_port +\set VERBOSITY terse SET search_path TO multi_mx_function_call_delegation, public; RESET client_min_messages; \set VERBOSITY terse diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index c0265b282..c3dbe3bdb 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -73,6 +73,7 @@ SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_name -- make sure that pg_class queries do not get blocked on table locks begin; +SET LOCAL citus.enable_ddl_propagation TO OFF; lock table test_table in access exclusive mode; prepare transaction 'take-aggressive-lock'; -- shards are hidden when using psql as application_name @@ -114,7 +115,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name (2 rows) -- changing application_name reveals the shards -SET application_name TO ''; +SET application_name TO 'pg_regress'; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; relname --------------------------------------------------------------------- @@ -137,7 +138,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- changing application_name in transaction reveals the shards BEGIN; -SET LOCAL application_name TO ''; +SET LOCAL application_name TO 'pg_regress'; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; relname --------------------------------------------------------------------- @@ -160,7 +161,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- now with session-level GUC, but ROLLBACK; BEGIN; -SET application_name TO ''; +SET application_name TO 'pg_regress'; ROLLBACK; -- shards are hidden again after GUCs are reset SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; @@ -173,7 +174,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- we should hide correctly based on application_name with savepoints BEGIN; SAVEPOINT s1; -SET application_name TO ''; +SET application_name TO 'pg_regress'; -- changing application_name reveals the shards SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; relname @@ -196,9 +197,9 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name (2 rows) ROLLBACK; --- changing citus.hide_shards_from_app_name_prefixes reveals the shards +-- changing citus.show_shards_for_app_name_prefix reveals the shards BEGIN; -SET LOCAL citus.hide_shards_from_app_name_prefixes TO 'notpsql'; +SET LOCAL citus.show_shards_for_app_name_prefixes TO 'psql'; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; relname --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index 933407024..9671ffefd 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -642,7 +642,6 @@ SELECT verify_metadata('localhost', :worker_1_port), -- Don't drop the reference table so it has shards on the nodes being disabled DROP TABLE dist_table_1, dist_table_2; SELECT pg_catalog.citus_disable_node('localhost', :worker_2_port); -NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back. citus_disable_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_truncate_from_worker.out b/src/test/regress/expected/multi_mx_truncate_from_worker.out index 7972e251a..bfef48c4d 100644 --- a/src/test/regress/expected/multi_mx_truncate_from_worker.out +++ b/src/test/regress/expected/multi_mx_truncate_from_worker.out @@ -215,9 +215,13 @@ RESET client_min_messages; \c - - - :master_port -- also test the infrastructure that is used for supporting -- TRUNCATE from worker nodes --- should fail since it is not in transaction block -SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); -ERROR: lock_relation_if_exists can only be used in transaction blocks +-- should pass since we don't check for xact block in lock_relation_if_exists +SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS SHARE'); + lock_relation_if_exists +--------------------------------------------------------------------- + t +(1 row) + BEGIN; -- should fail since the schema is not provided SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index a2c03ef7a..618c563de 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -4301,12 +4301,14 @@ WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_% (2 rows) -- should work properly - no names clashes +SET client_min_messages TO WARNING; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); ?column? --------------------------------------------------------------------- - 1 + 1 (1 row) +RESET client_min_messages; \c - - - :worker_1_port -- check that indexes are named properly SELECT tablename, indexname FROM pg_indexes diff --git a/src/test/regress/expected/multi_remove_node_reference_table.out b/src/test/regress/expected/multi_remove_node_reference_table.out index b2d38a196..c39b20735 100644 --- a/src/test/regress/expected/multi_remove_node_reference_table.out +++ b/src/test/regress/expected/multi_remove_node_reference_table.out @@ -978,6 +978,12 @@ ORDER BY shardid ASC; (0 rows) \c - - - :master_port +SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + SELECT citus_disable_node('localhost', :worker_2_port); citus_disable_node --------------------------------------------------------------------- @@ -997,6 +1003,19 @@ SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; 1 (1 row) +-- never mark coordinator metadatasynced = false +SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport = :master_port; + hasmetadata | metadatasynced +--------------------------------------------------------------------- + t | t +(1 row) + +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + SELECT shardid, shardstate, shardlength, nodename, nodeport FROM diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index bf661ccb8..b010a2c5d 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -161,51 +161,8 @@ DROP TABLE test_truncate_range; -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_hash(a int); -SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- verify no error is thrown when no shards are present -TRUNCATE TABLE test_truncate_hash; -SELECT count(*) FROM test_truncate_hash; - count ---------------------------------------------------------------------- - 0 -(1 row) - -INSERT INTO test_truncate_hash values (1); -ERROR: could not find any shards -DETAIL: No shards exist for distributed table "test_truncate_hash". -HINT: Run master_create_worker_shards to create shards and try again. -INSERT INTO test_truncate_hash values (1001); -ERROR: could not find any shards -DETAIL: No shards exist for distributed table "test_truncate_hash". -HINT: Run master_create_worker_shards to create shards and try again. -INSERT INTO test_truncate_hash values (2000); -ERROR: could not find any shards -DETAIL: No shards exist for distributed table "test_truncate_hash". -HINT: Run master_create_worker_shards to create shards and try again. -INSERT INTO test_truncate_hash values (100); -ERROR: could not find any shards -DETAIL: No shards exist for distributed table "test_truncate_hash". -HINT: Run master_create_worker_shards to create shards and try again. -SELECT count(*) FROM test_truncate_hash; - count ---------------------------------------------------------------------- - 0 -(1 row) - --- verify 4 shards are present -SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - shardid ---------------------------------------------------------------------- -(0 rows) - -TRUNCATE TABLE test_truncate_hash; -SELECT master_create_worker_shards('test_truncate_hash', 4, 1); - master_create_worker_shards +SELECT create_distributed_table('test_truncate_hash', 'a', 'hash'); + create_distributed_table --------------------------------------------------------------------- (1 row) @@ -214,37 +171,13 @@ INSERT INTO test_truncate_hash values (1); INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); -SELECT count(*) FROM test_truncate_hash; - count ---------------------------------------------------------------------- - 4 -(1 row) - -TRUNCATE TABLE test_truncate_hash; --- verify data is truncated from the table -SELECT count(*) FROM test_truncate_hash; - count ---------------------------------------------------------------------- - 0 -(1 row) - --- verify 4 shards are still presents -SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - shardid ---------------------------------------------------------------------- - 1210006 - 1210007 - 1210008 - 1210009 -(4 rows) - -- verify that truncate can be aborted INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; SELECT count(*) FROM test_truncate_hash; count --------------------------------------------------------------------- - 1 + 5 (1 row) DROP TABLE test_truncate_hash; diff --git a/src/test/regress/expected/mx_regular_user.out b/src/test/regress/expected/mx_regular_user.out index 9b60132e0..24af36179 100644 --- a/src/test/regress/expected/mx_regular_user.out +++ b/src/test/regress/expected/mx_regular_user.out @@ -89,7 +89,6 @@ INSERT INTO super_user_owned_regular_user_granted VALUES (1, 1), (2, 1) ON CONFL ERROR: permission denied for table super_user_owned_regular_user_granted TRUNCATE super_user_owned_regular_user_granted; ERROR: permission denied for table super_user_owned_regular_user_granted -CONTEXT: while executing command on localhost:xxxxx DELETE FROM super_user_owned_regular_user_granted; ERROR: permission denied for table super_user_owned_regular_user_granted UPDATE super_user_owned_regular_user_granted SET a = 1; diff --git a/src/test/regress/expected/nested_execution.out b/src/test/regress/expected/nested_execution.out new file mode 100644 index 000000000..6ac7fe640 --- /dev/null +++ b/src/test/regress/expected/nested_execution.out @@ -0,0 +1,111 @@ +SET search_path TO nested_execution; +SET citus.enable_local_execution TO on; +\set VERBOSITY terse +-- nested execution from queries on distributed tables is generally disallowed +SELECT dist_query_single_shard(key) FROM distributed WHERE key = 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT dist_query_multi_shard() FROM distributed WHERE key = 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT ref_query() FROM distributed WHERE key = 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT dist_query_single_shard(key) FROM distributed LIMIT 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT dist_query_multi_shard() FROM distributed LIMIT 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT ref_query() FROM distributed LIMIT 1; +ERROR: cannot execute a distributed query from a query on a shard +-- nested execution is allowed outside of an aggregate +-- note that this behaviour is different if distributed has only 1 shard +-- however, this test always uses 4 shards +SELECT dist_query_single_shard(count(*)::int) FROM distributed; + dist_query_single_shard +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT dist_query_multi_shard()+count(*) FROM distributed; + ?column? +--------------------------------------------------------------------- + 20 +(1 row) + +SELECT ref_query()+count(*) FROM distributed; + ?column? +--------------------------------------------------------------------- + 20 +(1 row) + +-- nested execution is allowed in a query that only has intermediate results +SELECT dist_query_single_shard(key) FROM (SELECT key FROM distributed LIMIT 1) s; + dist_query_single_shard +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT dist_query_multi_shard() FROM (SELECT key FROM distributed LIMIT 1) s; + dist_query_multi_shard +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT ref_query() FROM (SELECT key FROM distributed LIMIT 1) s; + ref_query +--------------------------------------------------------------------- + 10 +(1 row) + +-- nested execution from queries on reference tables is generally allowed +SELECT dist_query_single_shard(id::int) FROM reference WHERE id = 1; + dist_query_single_shard +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT dist_query_multi_shard() FROM reference WHERE id = 1; + dist_query_multi_shard +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT ref_query() FROM reference WHERE id = 1; + ref_query +--------------------------------------------------------------------- + 10 +(1 row) + +-- repeat checks in insert..select (somewhat different code path) +INSERT INTO distributed SELECT dist_query_single_shard(key) FROM distributed WHERE key = 1; +ERROR: cannot execute a distributed query from a query on a shard +INSERT INTO distributed SELECT dist_query_multi_shard() FROM distributed WHERE key = 1; +ERROR: cannot execute a distributed query from a query on a shard +INSERT INTO distributed SELECT ref_query() FROM distributed WHERE key = 1; +ERROR: cannot execute a distributed query from a query on a shard +INSERT INTO distributed SELECT dist_query_single_shard(key) FROM distributed LIMIT 1; +ERROR: cannot execute a distributed query from a query on a shard +INSERT INTO distributed SELECT dist_query_multi_shard() FROM distributed LIMIT 1; +ERROR: cannot execute a distributed query from a query on a shard +INSERT INTO distributed SELECT ref_query() FROM distributed LIMIT 1; +ERROR: cannot execute a distributed query from a query on a shard +BEGIN; +INSERT INTO distributed SELECT dist_query_single_shard(count(*)::int) FROM distributed; +INSERT INTO distributed SELECT dist_query_multi_shard()+count(*) FROM distributed; +INSERT INTO distributed SELECT ref_query()+count(*) FROM distributed; +ROLLBACK; +BEGIN; +INSERT INTO distributed SELECT dist_query_single_shard(key) FROM (SELECT key FROM distributed LIMIT 1) s; +INSERT INTO distributed SELECT dist_query_multi_shard() FROM (SELECT key FROM distributed LIMIT 1) s; +INSERT INTO distributed SELECT ref_query() FROM (SELECT key FROM distributed LIMIT 1) s; +ROLLBACK; +BEGIN; +INSERT INTO distributed SELECT dist_query_single_shard(id::int) FROM reference WHERE id = 1; +INSERT INTO distributed SELECT dist_query_multi_shard() FROM reference WHERE id = 1; +INSERT INTO distributed SELECT ref_query() FROM reference WHERE id = 1; +ROLLBACK; +-- nested execution without local execution is disallowed (not distinguishable from queries on shard) +SET citus.enable_local_execution TO off; +SELECT dist_query_single_shard(id::int) FROM reference WHERE id = 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT dist_query_multi_shard() FROM reference WHERE id = 1; +ERROR: cannot execute a distributed query from a query on a shard +SELECT ref_query() FROM reference WHERE id = 1; +ERROR: cannot execute a distributed query from a query on a shard diff --git a/src/test/regress/expected/nested_execution_create.out b/src/test/regress/expected/nested_execution_create.out new file mode 100644 index 000000000..889f08872 --- /dev/null +++ b/src/test/regress/expected/nested_execution_create.out @@ -0,0 +1,51 @@ +CREATE SCHEMA nested_execution; +SET search_path TO nested_execution; +-- some of the next_execution tests change for single shard +SET citus.shard_count TO 4; +CREATE TABLE distributed (key int, name text, + created_at timestamptz DEFAULT now()); +CREATE TABLE reference (id bigint PRIMARY KEY, title text); +SELECT create_distributed_table('distributed', 'key'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_reference_table('reference'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO distributed SELECT i, i::text, now() FROM generate_series(1,10)i; +INSERT INTO reference SELECT i, i::text FROM generate_series(1,10)i; +CREATE FUNCTION dist_query_single_shard(p_key int) +RETURNS bigint +LANGUAGE plpgsql AS $$ +DECLARE + result bigint; +BEGIN + SELECT count(*) INTO result FROM nested_execution.distributed WHERE key = p_key; + RETURN result; +END; +$$; +CREATE FUNCTION dist_query_multi_shard() +RETURNS bigint +LANGUAGE plpgsql AS $$ +DECLARE + result bigint; +BEGIN + SELECT count(*) INTO result FROM nested_execution.distributed; + RETURN result; +END; +$$; +CREATE FUNCTION ref_query() +RETURNS bigint +LANGUAGE plpgsql AS $$ +DECLARE + result bigint; +BEGIN + SELECT count(*) INTO result FROM nested_execution.reference; + RETURN result; +END; +$$; diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index c14502569..1b7b1b1e7 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -1076,7 +1076,6 @@ ERROR: cannot pushdown the subquery -- make sure that non-colocated subquery joins work fine in -- modifications CREATE TABLE table1 (id int, tenant_id int); -CREATE VIEW table1_view AS SELECT * from table1 where id < 100; CREATE TABLE table2 (id int, tenant_id int) partition by range(tenant_id); CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); -- modifications on the partitons are only allowed with rep=1 @@ -1093,6 +1092,7 @@ SELECT create_distributed_table('table1','tenant_id'); (1 row) +CREATE VIEW table1_view AS SELECT * from table1 where id < 100; -- all of the above queries are non-colocated subquery joins -- because the views are replaced with subqueries UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id; diff --git a/src/test/regress/expected/object_propagation_debug.out b/src/test/regress/expected/object_propagation_debug.out index 020fa2629..8cecb1c85 100644 --- a/src/test/regress/expected/object_propagation_debug.out +++ b/src/test/regress/expected/object_propagation_debug.out @@ -50,8 +50,9 @@ ON TRUE --------------------------------------------------------------------- ("composite type","""object prop""",t1,"""object prop"".t1") (schema,,"""object prop""","""object prop""") + (table,"""object prop""",test,"""object prop"".test") (type,"""object prop""",t1,"""object prop"".t1") -(3 rows) +(4 rows) -- find all the dependencies of type t1 SELECT diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index 3c2298a4e..acfa60474 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -656,12 +656,12 @@ CREATE USER read_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SET ROLE read_access; --- user shouldn't be able to execute alter_columnar_table_set --- or alter_columnar_table_reset for a columnar table that it +-- user shouldn't be able to execute ALTER TABLE ... SET +-- or ALTER TABLE ... RESET for a columnar table that it -- doesn't own -SELECT alter_columnar_table_set('test_pg12.superuser_columnar_table', chunk_group_row_limit => 100); +ALTER TABLE test_pg12.superuser_columnar_table SET(columnar.chunk_group_row_limit = 100); ERROR: permission denied for schema test_pg12 -SELECT alter_columnar_table_reset('test_pg12.superuser_columnar_table'); +ALTER TABLE test_pg12.superuser_columnar_table RESET (columnar.chunk_group_row_limit); ERROR: permission denied for schema test_pg12 RESET ROLE; DROP USER read_access; diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index f5fb0f62e..33ad7dbc0 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -1319,6 +1319,49 @@ SELECT count(*) FROM foreign_table; (1 row) TRUNCATE foreign_table; +-- test truncating foreign tables in the same statement with +-- other distributed tables +CREATE TABLE foreign_table_test_2 (id integer NOT NULL, data text, a bigserial); +CREATE FOREIGN TABLE foreign_table_2 +( + id integer NOT NULL, + data text, + a bigserial +) + SERVER foreign_server + OPTIONS (schema_name 'pg14', table_name 'foreign_table_test_2'); +SELECT citus_add_local_table_to_metadata('foreign_table_2'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE dist_table_1(a int); +CREATE TABLE dist_table_2(a int); +CREATE TABLE dist_table_3(a int); +SELECT create_distributed_table('dist_table_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('dist_table_2', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_reference_table('dist_table_3'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +TRUNCATE foreign_table, foreign_table_2; +TRUNCATE dist_table_1, foreign_table, dist_table_2, foreign_table_2, dist_table_3; +TRUNCATE dist_table_1, dist_table_2, foreign_table, dist_table_3; +TRUNCATE dist_table_1, foreign_table, foreign_table_2, dist_table_3; +TRUNCATE dist_table_1, foreign_table, foreign_table_2, dist_table_3, dist_table_2; \c - - - :worker_1_port set search_path to pg14; -- verify the foreign table is truncated diff --git a/src/test/regress/expected/postgres.out b/src/test/regress/expected/postgres.out index 2fdb1714e..3c146525c 100644 --- a/src/test/regress/expected/postgres.out +++ b/src/test/regress/expected/postgres.out @@ -24,3 +24,16 @@ AS $function$ BEGIN END; $function$; +CREATE OR REPLACE FUNCTION pg_catalog.create_distributed_function ( + function_name regprocedure, + distribution_arg_name text DEFAULT NULL, + colocate_with text DEFAULT 'default', + force_delegation bool DEFAULT NULL +) + RETURNS void + LANGUAGE plpgsql + CALLED ON NULL INPUT + AS $function$ + BEGIN + END; + $function$; diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 41c012641..ec900db1e 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -218,8 +218,9 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam -- adding the second node will fail as the text search template needs to be created manually SELECT 1 from master_add_node('localhost', :worker_2_port); -ERROR: text search template "public.intdict_template" does not exist +WARNING: text search template "public.intdict_template" does not exist CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx -- create the text search template manually on the worker \c - - - :worker_2_port SET citus.enable_metadata_sync TO false; @@ -305,6 +306,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname -- propagated to the workers. the user should run it manually on the workers CREATE TABLE t1 (A int); CREATE VIEW v1 AS select * from t1; +WARNING: "view v1" has dependency to "table t1" that is not in Citus' metadata +DETAIL: "view v1" will be created only locally +HINT: Distribute "table t1" first to distribute "view v1" ALTER EXTENSION seg ADD VIEW v1; ALTER EXTENSION seg DROP VIEW v1; DROP VIEW v1; diff --git a/src/test/regress/expected/propagate_foreign_servers.out b/src/test/regress/expected/propagate_foreign_servers.out index c0dbfcdb9..551d1dde7 100644 --- a/src/test/regress/expected/propagate_foreign_servers.out +++ b/src/test/regress/expected/propagate_foreign_servers.out @@ -92,11 +92,8 @@ SELECT run_command_on_workers($$select r.rolname from pg_roles r join pg_class c CREATE SERVER foreign_server_to_drop FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'test'); ---should error -DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop; -ERROR: cannot drop distributed server with other servers -HINT: Try dropping each object in a separate DROP command DROP FOREIGN TABLE foreign_table; +DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop; SELECT citus_remove_node('localhost', :master_port); citus_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/propagate_statistics.out b/src/test/regress/expected/propagate_statistics.out index 8a1255406..80e944bc8 100644 --- a/src/test/regress/expected/propagate_statistics.out +++ b/src/test/regress/expected/propagate_statistics.out @@ -70,12 +70,12 @@ CREATE STATISTICS s5 ON a,b FROM test_stats4; -- s6 doesn't exist DROP STATISTICS IF EXISTS s3, sc2.s4, s6; DROP STATISTICS s5,s6; -ERROR: statistics object "statistics'Test.s6" does not exist +ERROR: statistics object "s6" does not exist DROP STATISTICS IF EXISTS s5,s5,s6,s6; -- test renaming statistics CREATE STATISTICS s6 ON a,b FROM test_stats4; DROP STATISTICS s7; -ERROR: statistics object "statistics'Test.s7" does not exist +ERROR: statistics object "s7" does not exist ALTER STATISTICS s6 RENAME TO s7; ALTER STATISTICS sc1.st1 RENAME TO st1_new; -- test altering stats schema diff --git a/src/test/regress/expected/recursive_dml_queries_mx.out b/src/test/regress/expected/recursive_dml_queries_mx.out index fed88e1f2..b1e29ffb3 100644 --- a/src/test/regress/expected/recursive_dml_queries_mx.out +++ b/src/test/regress/expected/recursive_dml_queries_mx.out @@ -110,6 +110,7 @@ WHERE SET search_path TO recursive_dml_queries_mx, public; CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text); INSERT INTO local_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW tenant_ids AS SELECT tenant_id, name @@ -118,6 +119,7 @@ CREATE VIEW tenant_ids AS WHERE distributed_table.dept::text = reference_table.id ORDER BY 2 DESC, 1 DESC; +RESET citus.enable_ddl_propagation; -- we currently do not allow local tables in modification queries UPDATE distributed_table diff --git a/src/test/regress/expected/recursive_view_local_table.out b/src/test/regress/expected/recursive_view_local_table.out index a2306a2e9..b4ef802b4 100644 --- a/src/test/regress/expected/recursive_view_local_table.out +++ b/src/test/regress/expected/recursive_view_local_table.out @@ -12,6 +12,9 @@ CREATE RECURSIVE VIEW recursive_view(val_1, val_2) AS WHERE val_2 < 50 ); CREATE RECURSIVE VIEW recursive_defined_non_recursive_view(c) AS (SELECT 1 FROM local_table); +WARNING: "view recursive_defined_non_recursive_view" has dependency to "table local_table" that is not in Citus' metadata +DETAIL: "view recursive_defined_non_recursive_view" will be created only locally +HINT: Distribute "table local_table" first to distribute "view recursive_defined_non_recursive_view" CREATE TABLE ref_table(a int, b INT); SELECT create_reference_table('ref_table'); create_reference_table diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 948adb050..a23b44ffa 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -382,6 +382,9 @@ $Q$); (2 rows) CREATE VIEW local_table_v AS SELECT * FROM local_table WHERE a BETWEEN 1 AND 10; +WARNING: "view local_table_v" has dependency to "table local_table" that is not in Citus' metadata +DETAIL: "view local_table_v" will be created only locally +HINT: Distribute "table local_table" first to distribute "view local_table_v" SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT * FROM squares JOIN local_table_v ON squares.a = local_table_v.a; diff --git a/src/test/regress/expected/replicated_table_disable_node.out b/src/test/regress/expected/replicated_table_disable_node.out index aa9de483f..60de41f08 100644 --- a/src/test/regress/expected/replicated_table_disable_node.out +++ b/src/test/regress/expected/replicated_table_disable_node.out @@ -20,7 +20,6 @@ INSERT INTO replicated SELECT i,i FROM generate_series(0,10)i; INSERT INTO ref SELECT i,i FROM generate_series(0,10)i; -- should be successfully disable node SELECT citus_disable_node('localhost', :worker_2_port, true); -NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back. citus_disable_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/resync_metadata_with_sequences.out b/src/test/regress/expected/resync_metadata_with_sequences.out index f96651a14..930cf33d4 100644 --- a/src/test/regress/expected/resync_metadata_with_sequences.out +++ b/src/test/regress/expected/resync_metadata_with_sequences.out @@ -140,6 +140,7 @@ INSERT INTO sensors_news VALUES (DEFAULT, DEFAULT, '2021-01-01') RETURNING *; (1 row) \c - - - :master_port +SET client_min_messages TO ERROR; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); ?column? --------------------------------------------------------------------- diff --git a/src/test/regress/expected/run_command_on_all_nodes.out b/src/test/regress/expected/run_command_on_all_nodes.out index 9cf838c83..76c42ad23 100644 --- a/src/test/regress/expected/run_command_on_all_nodes.out +++ b/src/test/regress/expected/run_command_on_all_nodes.out @@ -54,6 +54,7 @@ SELECT tablename FROM pg_tables WHERE schemaname = 'run_command_on_all_nodes'; (1 row) \c - - - :master_port +SET search_path TO run_command_on_all_nodes; SELECT result FROM run_command_on_all_nodes('SELECT tablename FROM pg_tables WHERE schemaname = ''run_command_on_all_nodes'';'); result --------------------------------------------------------------------- @@ -62,7 +63,15 @@ SELECT result FROM run_command_on_all_nodes('SELECT tablename FROM pg_tables WHE tbl (3 rows) +CREATE TABLE test (x int, y int); +SELECT create_distributed_table('test','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + -- break a node and check messages +BEGIN; SELECT nodeid AS worker_1_nodeid FROM pg_dist_node WHERE nodeport = :worker_1_port \gset UPDATE pg_dist_node SET nodeport = 0 WHERE nodeid = :worker_1_nodeid; SELECT nodeid = :worker_1_nodeid AS "Is Worker 1", success, result FROM run_command_on_all_nodes('SELECT 1') ORDER BY 1; @@ -83,6 +92,116 @@ CONTEXT: PL/pgSQL function run_command_on_all_nodes(text,boolean,boolean) line t | f | failed to connect to localhost:xxxxx (3 rows) -UPDATE pg_dist_node SET nodeport = :worker_1_port WHERE nodeid = :worker_1_nodeid; +ROLLBACK; +-- break connection to localhost +BEGIN; +UPDATE pg_dist_node SET nodeport = 0 WHERE groupid = 0; +SELECT success, result +FROM run_command_on_coordinator('SELECT inet_server_port()') ORDER BY 1; + success | result +--------------------------------------------------------------------- + t | 57636 +(1 row) + +SELECT success, result +FROM run_command_on_coordinator('SELECT inet_server_port()', give_warning_for_connection_errors:=true) ORDER BY 1; + success | result +--------------------------------------------------------------------- + t | 57636 +(1 row) + +ROLLBACK; +-- we cannot use run_command_on_coordinator from workers if coordinator is not in the metadata +SELECT success, result FROM run_command_on_all_nodes($$select result from run_command_on_coordinator('select inet_server_port()')$$); + success | result +--------------------------------------------------------------------- + f | ERROR: the coordinator is not added to the metadata + f | ERROR: the coordinator is not added to the metadata + t | 57636 +(3 rows) + +-- we can use run_command_on_coordinator from any node if the coordinator is in the metadata +SELECT citus_set_coordinator_host('localhost'); + citus_set_coordinator_host +--------------------------------------------------------------------- + +(1 row) + +SELECT success, result FROM run_command_on_all_nodes($$select result from run_command_on_coordinator('select inet_server_port()')$$); + success | result +--------------------------------------------------------------------- + t | 57636 + t | 57636 + t | 57636 +(3 rows) + +SELECT success, result FROM run_command_on_all_nodes($$select result from run_command_on_coordinator('select count(*) from run_command_on_all_nodes.test')$$); + success | result +--------------------------------------------------------------------- + t | 0 + t | 0 + t | 0 +(3 rows) + +\c - - - :worker_1_port +-- poor man's DDL from worker +select result from run_command_on_coordinator($$create index on run_command_on_all_nodes.test (x)$$); + result +--------------------------------------------------------------------- + CREATE INDEX +(1 row) + +\c - - - :master_port +-- remove coordinator from metadata to restore pre-test situation +SELECT citus_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- check that we fail when pg_dist_node is empty +BEGIN; +DELETE FROM pg_dist_node; +SELECT success, result FROM run_command_on_coordinator('select inet_server_port()'); +ERROR: the coordinator is not added to the metadata +HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('') +CONTEXT: PL/pgSQL function run_command_on_coordinator(text,boolean) line XX at RAISE +ROLLBACK; +-- check that we can do distributed queries from worker nodes +SELECT success, result FROM run_command_on_all_nodes($$insert into run_command_on_all_nodes.test values (1,2)$$, true); + success | result +--------------------------------------------------------------------- + t | INSERT 0 1 + t | INSERT 0 1 + t | INSERT 0 1 +(3 rows) + +SELECT success, result FROM run_command_on_all_nodes($$insert into run_command_on_all_nodes.test values (1,2)$$, false); + success | result +--------------------------------------------------------------------- + t | INSERT 0 1 + t | INSERT 0 1 + t | INSERT 0 1 +(3 rows) + +SELECT success, result FROM run_command_on_all_nodes($$select count(*) from run_command_on_all_nodes.test$$); + success | result +--------------------------------------------------------------------- + t | 6 + t | 6 + t | 6 +(3 rows) + +-- ddl commands are only allowed from the coordinator +SELECT success, result FROM run_command_on_all_nodes($$create index on run_command_on_all_nodes.test (x)$$); + success | result +--------------------------------------------------------------------- + f | ERROR: operation is not allowed on this node + f | ERROR: operation is not allowed on this node + t | CREATE INDEX +(3 rows) + DROP SCHEMA run_command_on_all_nodes CASCADE; -NOTICE: drop cascades to table run_command_on_all_nodes.tbl +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table run_command_on_all_nodes.tbl +drop cascades to table run_command_on_all_nodes.test diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index b443d99e9..918f65147 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -438,6 +438,9 @@ CREATE VIEW replication_test_table_placements_per_node AS AND shardstate != 4 GROUP BY nodename, nodeport ORDER BY nodename, nodeport; +WARNING: "view replication_test_table_placements_per_node" has dependency to "table replication_test_table" that is not in Citus' metadata +DETAIL: "view replication_test_table_placements_per_node" will be created only locally +HINT: Distribute "table replication_test_table" first to distribute "view replication_test_table_placements_per_node" -- Create four shards with replication factor 2, and delete the placements -- with smaller port number to simulate under-replicated shards. SELECT count(master_create_empty_shard('replication_test_table')) diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index c854ec48a..6177e215e 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -10,6 +10,13 @@ ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0; -- adding the coordinator as inactive is disallowed SELECT 1 FROM master_add_inactive_node('localhost', :master_port, groupid => 0); ERROR: coordinator node cannot be added as inactive node +-- before adding a node we are not officially a coordinator +SELECT citus_is_coordinator(); + citus_is_coordinator +--------------------------------------------------------------------- + f +(1 row) + -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); @@ -18,6 +25,13 @@ SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); 1 (1 row) +-- after adding a node we are officially a coordinator +SELECT citus_is_coordinator(); + citus_is_coordinator +--------------------------------------------------------------------- + t +(1 row) + -- coordinator cannot be disabled SELECT 1 FROM citus_disable_node('localhost', :master_port); ERROR: cannot change "isactive" field of the coordinator node @@ -2231,15 +2245,6 @@ NOTICE: executing the command locally: UPDATE single_node.another_schema_table_ (1 row) ROLLBACK; --- same without transaction block -WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 WHERE a = 1 RETURNING *) -SELECT coordinated_transaction_should_use_2PC() FROM cte_1; -NOTICE: executing the command locally: WITH cte_1 AS (UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (another_schema_table.b OPERATOR(pg_catalog.+) 1) WHERE (another_schema_table.a OPERATOR(pg_catalog.=) 1) RETURNING another_schema_table.a, another_schema_table.b) SELECT single_node.coordinated_transaction_should_use_2pc() AS coordinated_transaction_should_use_2pc FROM cte_1 - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -- if the local execution is disabled, we cannot failover to -- local execution and the queries would fail SET citus.enable_local_execution TO false; diff --git a/src/test/regress/expected/sqlsmith_failures.out b/src/test/regress/expected/sqlsmith_failures.out index d276f04a9..6440cf75f 100644 --- a/src/test/regress/expected/sqlsmith_failures.out +++ b/src/test/regress/expected/sqlsmith_failures.out @@ -142,8 +142,9 @@ where (select pg_catalog.array_agg(id) from sqlsmith_failures.countries) -- cleanup DROP SCHEMA sqlsmith_failures CASCADE; -NOTICE: drop cascades to 6 other objects +NOTICE: drop cascades to 7 other objects DETAIL: drop cascades to table countries +drop cascades to table countries_1280000 drop cascades to table orgs drop cascades to table users drop cascades to table orders diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out index 1f82c60cb..d3f961124 100644 --- a/src/test/regress/expected/start_stop_metadata_sync.out +++ b/src/test/regress/expected/start_stop_metadata_sync.out @@ -141,7 +141,6 @@ SELECT * FROM distributed_table_1; --------------------------------------------------------------------- (0 rows) -CREATE VIEW test_view AS SELECT COUNT(*) FROM distributed_table_3; CREATE MATERIALIZED VIEW test_matview AS SELECT COUNT(*) FROM distributed_table_3; SELECT * FROM test_view; count diff --git a/src/test/regress/expected/subquery_in_targetlist.out b/src/test/regress/expected/subquery_in_targetlist.out index a32b480f1..79989b409 100644 --- a/src/test/regress/expected/subquery_in_targetlist.out +++ b/src/test/regress/expected/subquery_in_targetlist.out @@ -278,6 +278,8 @@ ORDER BY 1 LIMIT 3; ERROR: correlated subqueries are not supported when the FROM clause contains a subquery without FROM -- sublink on view CREATE TEMP VIEW view_1 AS (SELECT user_id, value_2 FROM users_table WHERE user_id = 1 AND value_1 = 1 ORDER BY 1,2); +WARNING: "view view_1" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view view_1" will be created only locally -- with distribution column group by SELECT (SELECT value_2 FROM view_1 WHERE user_id = e.user_id GROUP BY user_id, value_2) FROM events_table e @@ -323,6 +325,8 @@ ORDER BY 1 LIMIT 3; -- sublink on reference table view CREATE TEMP VIEW view_2 AS (SELECT user_id, value_2 FROM users_reference_table WHERE user_id = 1 AND value_1 = 1); +WARNING: "view view_2" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view view_2" will be created only locally SELECT (SELECT value_2 FROM view_2 WHERE user_id = e.user_id GROUP BY user_id, value_2) FROM events_table e GROUP BY 1 diff --git a/src/test/regress/expected/subquery_in_where.out b/src/test/regress/expected/subquery_in_where.out index c5aedd31d..c5ffc8d93 100644 --- a/src/test/regress/expected/subquery_in_where.out +++ b/src/test/regress/expected/subquery_in_where.out @@ -1096,6 +1096,8 @@ CREATE TEMPORARY VIEW correlated_subquery_view AS FROM events_table e1 WHERE e1.user_id = u1.user_id ) > 0; +WARNING: "view correlated_subquery_view" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view correlated_subquery_view" will be created only locally SELECT sum(user_id) FROM correlated_subquery_view; sum --------------------------------------------------------------------- diff --git a/src/test/regress/expected/subquery_partitioning.out b/src/test/regress/expected/subquery_partitioning.out index 80ea4478a..57a589600 100644 --- a/src/test/regress/expected/subquery_partitioning.out +++ b/src/test/regress/expected/subquery_partitioning.out @@ -209,6 +209,9 @@ FROM ) as foo WHERE foo.user_id = cte.user_id ) as foo, users_table WHERE foo.cnt > users_table.value_2; +WARNING: "view subquery_and_ctes" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view subquery_and_ctes" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view subquery_and_ctes" SELECT * FROM subquery_and_ctes ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; diff --git a/src/test/regress/expected/subquery_view.out b/src/test/regress/expected/subquery_view.out index 5f50e6e6d..535e356d5 100644 --- a/src/test/regress/expected/subquery_view.out +++ b/src/test/regress/expected/subquery_view.out @@ -281,6 +281,9 @@ FROM ) as baz WHERE baz.user_id = users_table.user_id ) as sub1; +WARNING: "view subquery_from_from_where_local_table" has dependency to "table events_table_local" that is not in Citus' metadata +DETAIL: "view subquery_from_from_where_local_table" will be created only locally +HINT: Distribute "table events_table_local" first to distribute "view subquery_from_from_where_local_table" SELECT * FROM @@ -337,6 +340,9 @@ FROM SELECT user_id FROM users_table_local WHERE user_id = 2 ) baw WHERE foo.value_2 = bar.user_id AND baz.value_2 = bar.user_id AND bar.user_id = baw.user_id; +WARNING: "view all_executors_view" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view all_executors_view" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view all_executors_view" SELECT * FROM @@ -390,6 +396,9 @@ FROM ) as foo WHERE foo.user_id = cte.user_id ) as foo, users_table WHERE foo.cnt > users_table.value_2; +WARNING: "view subquery_and_ctes" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view subquery_and_ctes" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view subquery_and_ctes" SELECT * FROM subquery_and_ctes ORDER BY 3 DESC, 1 DESC, 2 DESC, 4 DESC LIMIT 5; @@ -437,6 +446,9 @@ SELECT time, event_type, value_2, value_3 FROM events_table WHERE foo.user_id = events_table.value_2; +WARNING: "view subquery_and_ctes_second" has dependency to "table users_table_local" that is not in Citus' metadata +DETAIL: "view subquery_and_ctes_second" will be created only locally +HINT: Distribute "table users_table_local" first to distribute "view subquery_and_ctes_second" SELECT * FROM subquery_and_ctes_second ORDER BY 3 DESC, 2 DESC, 1 DESC LIMIT 5; diff --git a/src/test/regress/expected/union_pushdown.out b/src/test/regress/expected/union_pushdown.out index 3d68bd8a5..cbee11f8e 100644 --- a/src/test/regress/expected/union_pushdown.out +++ b/src/test/regress/expected/union_pushdown.out @@ -899,6 +899,9 @@ INSERT INTO range_dist_table_2 VALUES ((10, 91)); INSERT INTO range_dist_table_2 VALUES ((20, 100)); -- the following can be pushed down CREATE OR REPLACE VIEW v2 AS SELECT * from range_dist_table_2 UNION ALL SELECT * from range_dist_table_2; +WARNING: "view v2" has dependency to "table range_dist_table_2" that is not in Citus' metadata +DETAIL: "view v2" will be created only locally +HINT: Distribute "table range_dist_table_2" first to distribute "view v2" SELECT public.explain_has_distributed_subplan($$ EXPLAIN SELECT COUNT(dist_col) FROM v2; diff --git a/src/test/regress/expected/upgrade_columnar_after.out b/src/test/regress/expected/upgrade_columnar_after.out index 196b3c3c7..518cc1590 100644 --- a/src/test/regress/expected/upgrade_columnar_after.out +++ b/src/test/regress/expected/upgrade_columnar_after.out @@ -101,10 +101,10 @@ SELECT * FROM matview ORDER BY a; (2 rows) -- test we retained options -SELECT * FROM columnar.options WHERE regclass = 'test_options_1'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +SELECT * FROM columnar.options WHERE relation = 'test_options_1'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - test_options_1 | 1000 | 5000 | 3 | pglz + test_options_1 | 1000 | 5000 | pglz | 3 (1 row) VACUUM VERBOSE test_options_1; @@ -121,10 +121,10 @@ SELECT count(*), sum(a), sum(b) FROM test_options_1; 10000 | 50005000 | 45010 (1 row) -SELECT * FROM columnar.options WHERE regclass = 'test_options_2'::regclass; - regclass | chunk_group_row_limit | stripe_row_limit | compression_level | compression +SELECT * FROM columnar.options WHERE relation = 'test_options_2'::regclass; + relation | chunk_group_row_limit | stripe_row_limit | compression | compression_level --------------------------------------------------------------------- - test_options_2 | 2000 | 6000 | 13 | none + test_options_2 | 2000 | 6000 | none | 13 (1 row) VACUUM VERBOSE test_options_2; @@ -272,7 +272,7 @@ ROLLBACK; SELECT pg_class.oid INTO columnar_schema_members FROM pg_class, pg_namespace WHERE pg_namespace.oid=pg_class.relnamespace AND - pg_namespace.nspname='columnar'; + pg_namespace.nspname='columnar_internal'; SELECT refobjid INTO columnar_schema_members_pg_depend FROM pg_depend WHERE classid = 'pg_am'::regclass::oid AND @@ -304,7 +304,7 @@ $$ SELECT pg_class.oid INTO columnar_schema_members FROM pg_class, pg_namespace WHERE pg_namespace.oid=pg_class.relnamespace AND - pg_namespace.nspname='columnar'; + pg_namespace.nspname='columnar_internal'; SELECT refobjid INTO columnar_schema_members_pg_depend FROM pg_depend WHERE classid = 'pg_am'::regclass::oid AND diff --git a/src/test/regress/expected/upgrade_columnar_metapage_after.out b/src/test/regress/expected/upgrade_columnar_metapage_after.out index 7268167dd..87e2424cb 100644 --- a/src/test/regress/expected/upgrade_columnar_metapage_after.out +++ b/src/test/regress/expected/upgrade_columnar_metapage_after.out @@ -94,7 +94,7 @@ SELECT version_major, version_minor, reserved_stripe_id, reserved_row_number (1 row) -- table is already upgraded, make sure that upgrade_columnar_metapage is no-op -SELECT citus_internal.upgrade_columnar_storage(c.oid) +SELECT columnar_internal.upgrade_columnar_storage(c.oid) FROM pg_class c, pg_am a WHERE c.relam = a.oid AND amname = 'columnar' and relname = 'columnar_table_2'; upgrade_columnar_storage diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index ea7fe01a4..c4db77f8d 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -16,10 +16,7 @@ WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass ORDER BY 1; description --------------------------------------------------------------------- - access method columnar event trigger citus_cascade_to_partition - function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) - function alter_columnar_table_set(regclass,integer,integer,name,integer) function alter_distributed_table(regclass,text,integer,text,boolean) function alter_old_partitions_set_access_method(regclass,timestamp with time zone,name) function alter_role_if_exists(text,text) @@ -62,8 +59,6 @@ ORDER BY 1; function citus_finalize_upgrade_to_citus11(boolean) function citus_finish_pg_upgrade() function citus_get_active_worker_nodes() - function citus_internal.columnar_ensure_am_depends_catalog() - function citus_internal.downgrade_columnar_storage(regclass) function citus_internal.find_groupid_for_node(text,integer) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() @@ -71,7 +66,6 @@ ORDER BY 1; function citus_internal.refresh_isolation_tester_prepared_statement() function citus_internal.replace_isolation_tester_func() function citus_internal.restore_isolation_tester_func() - function citus_internal.upgrade_columnar_storage(regclass) function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") @@ -83,6 +77,7 @@ ORDER BY 1; function citus_internal_local_blocked_processes() function citus_internal_update_placement_metadata(bigint,integer,integer) function citus_internal_update_relation_colocation(oid,integer) + function citus_is_coordinator() function citus_isolation_test_session_is_blocked(integer,integer[]) function citus_json_concatenate(json,json) function citus_json_concatenate_final(json) @@ -127,7 +122,6 @@ ORDER BY 1; function citus_version() function column_name_to_column(regclass,text) function column_to_column_name(regclass,text) - function columnar.columnar_handler(internal) function coord_combine_agg(oid,cstring,anyelement) function coord_combine_agg_ffunc(internal,oid,cstring,anyelement) function coord_combine_agg_sfunc(internal,oid,cstring,anyelement) @@ -198,10 +192,12 @@ ORDER BY 1; function role_exists(name) function run_command_on_all_nodes(text,boolean,boolean) function run_command_on_colocated_placements(regclass,regclass,text,boolean) + function run_command_on_coordinator(text,boolean) function run_command_on_placements(regclass,text,boolean) function run_command_on_shards(regclass,text,boolean) function run_command_on_workers(text,boolean) function shard_name(regclass,bigint) + function start_metadata_sync_to_all_nodes() function start_metadata_sync_to_node(text,integer) function stop_metadata_sync_to_node(text,integer,boolean) function time_partition_range(regclass) @@ -238,17 +234,11 @@ ORDER BY 1; function worker_save_query_explain_analyze(text,jsonb) schema citus schema citus_internal - schema columnar - sequence columnar.storageid_seq sequence pg_dist_colocationid_seq sequence pg_dist_groupid_seq sequence pg_dist_node_nodeid_seq sequence pg_dist_placement_placementid_seq sequence pg_dist_shardid_seq - table columnar.chunk - table columnar.chunk_group - table columnar.options - table columnar.stripe table pg_dist_authinfo table pg_dist_colocation table pg_dist_local_group @@ -275,5 +265,5 @@ ORDER BY 1; view citus_stat_statements view pg_dist_shard_placement view time_partitions -(259 rows) +(249 rows) diff --git a/src/test/regress/expected/upgrade_post_11_after.out b/src/test/regress/expected/upgrade_post_11_after.out index b38be95c7..a52c5a2e0 100644 --- a/src/test/regress/expected/upgrade_post_11_after.out +++ b/src/test/regress/expected/upgrade_post_11_after.out @@ -9,24 +9,37 @@ NOTICE: Preparing to sync the metadata to all nodes t (1 row) --- tables are objects with Citus 11+ -SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype) ORDER BY 1; +-- tables, views and their dependencies become objects with Citus 11+ +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.employees'::regclass, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.my_type_for_view'::regtype, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_table_for_view'::regclass, 'post_11_upgrade.non_dist_upgrade_test_view'::regclass, 'post_11_upgrade.non_dist_upgrade_test_view_local_join'::regclass, 'post_11_upgrade.non_dist_upgrade_multiple_dist_view'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass, 'post_11_upgrade.v_test_1'::regclass, 'post_11_upgrade.v_test_2'::regclass, 'post_11_upgrade.owned_by_extension_table'::regclass, 'post_11_upgrade.materialized_view'::regclass, 'post_11_upgrade.owned_by_extension_view'::regclass, 'post_11_upgrade.local_type'::regtype, 'post_11_upgrade.non_dist_dist_table_for_view'::regclass, 'post_11_upgrade.depends_on_nothing_1'::regclass, 'post_11_upgrade.depends_on_nothing_2'::regclass, 'post_11_upgrade.depends_on_pg'::regclass, 'post_11_upgrade.depends_on_citus'::regclass, 'post_11_upgrade.depends_on_seq'::regclass, 'post_11_upgrade.depends_on_seq_and_no_support'::regclass) ORDER BY 1; pg_identify_object_as_address --------------------------------------------------------------------- (function,"{post_11_upgrade,func_in_transaction_def}",{}) (schema,{post_11_upgrade},{}) + (table,"{post_11_upgrade,employees}",{}) (table,"{post_11_upgrade,part_table}",{}) (table,"{post_11_upgrade,sensors}",{}) ("text search configuration","{post_11_upgrade,partial_index_test_config}",{}) (type,{post_11_upgrade.my_type},{}) -(6 rows) + (type,{post_11_upgrade.my_type_for_view},{}) + (view,"{post_11_upgrade,depends_on_citus}",{}) + (view,"{post_11_upgrade,depends_on_nothing_1}",{}) + (view,"{post_11_upgrade,depends_on_nothing_2}",{}) + (view,"{post_11_upgrade,depends_on_pg}",{}) + (view,"{post_11_upgrade,depends_on_seq}",{}) + (view,"{post_11_upgrade,non_dist_upgrade_multiple_dist_view}",{}) + (view,"{post_11_upgrade,non_dist_upgrade_ref_view}",{}) + (view,"{post_11_upgrade,non_dist_upgrade_ref_view_2}",{}) + (view,"{post_11_upgrade,reporting_line}",{}) + (view,"{post_11_upgrade,view_for_upgrade_test}",{}) + (view,"{post_11_upgrade,view_for_upgrade_test_my_type}",{}) +(19 rows) -- on all nodes -SELECT run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype) ORDER BY 1;$$) ORDER BY 1; - run_command_on_workers +SELECT run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass) ORDER BY 1;$$) ORDER BY 1; + run_command_on_workers --------------------------------------------------------------------- - (localhost,57636,t,"{""(type,{post_11_upgrade.my_type},{})"",""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})""}") - (localhost,57637,t,"{""(type,{post_11_upgrade.my_type},{})"",""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})""}") + (localhost,57636,t,"{""(type,{post_11_upgrade.my_type},{})"",""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test_my_type}\\"",{})"",""(view,\\""{post_11_upgrade,non_dist_upgrade_ref_view_2}\\"",{})"",""(view,\\""{post_11_upgrade,reporting_line}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})""}") + (localhost,57637,t,"{""(type,{post_11_upgrade.my_type},{})"",""(function,\\""{post_11_upgrade,func_in_transaction_def}\\"",{})"",""(table,\\""{post_11_upgrade,part_table}\\"",{})"",""(table,\\""{post_11_upgrade,sensors}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test}\\"",{})"",""(view,\\""{post_11_upgrade,view_for_upgrade_test_my_type}\\"",{})"",""(view,\\""{post_11_upgrade,non_dist_upgrade_ref_view_2}\\"",{})"",""(view,\\""{post_11_upgrade,reporting_line}\\"",{})"",""(schema,{post_11_upgrade},{})"",""(\\""text search configuration\\"",\\""{post_11_upgrade,partial_index_test_config}\\"",{})""}") (2 rows) -- Create the necessary test utility function @@ -68,7 +81,9 @@ UNION EXCEPT SELECT unnest(activate_node_snapshot()) as command ) -) AS foo WHERE command NOT ILIKE '%distributed_object_data%'; +) AS foo WHERE command NOT ILIKE '%distributed_object_data%' and +-- sequences differ per node, so exclude +command NOT ILIKE '%sequence%'; same_metadata_in_workers --------------------------------------------------------------------- t diff --git a/src/test/regress/expected/upgrade_post_11_before.out b/src/test/regress/expected/upgrade_post_11_before.out index 37bbab11b..3ae8f3d8a 100644 --- a/src/test/regress/expected/upgrade_post_11_before.out +++ b/src/test/regress/expected/upgrade_post_11_before.out @@ -114,6 +114,42 @@ INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +-- table for recursive view +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- table for owned_by_extension +-- note that tables owned by extension are +-- not added to the pg_dist_object, and assumed +-- to exists on all nodes via the extension +CREATE TABLE owned_by_extension_table (employee_id int, manager_id int, full_name text); +ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT create_distributed_table('owned_by_extension_table', 'employee_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT run_command_on_workers($$CREATE TABLE post_11_upgrade.owned_by_extension_table (employee_id int, manager_id int, full_name text);$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,"CREATE TABLE") + (localhost,57637,t,"CREATE TABLE") +(2 rows) + +SELECT run_command_on_workers($$ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,"ALTER EXTENSION") + (localhost,57637,t,"ALTER EXTENSION") +(2 rows) + SET citus.enable_ddl_propagation TO off; CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default ); SELECT 1 FROM run_command_on_workers($$CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default );$$); @@ -147,6 +183,72 @@ $$;'); (2 rows) CREATE TYPE post_11_upgrade.my_type AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors; +-- one normally would not need views on the workers pre-11, but still +-- nice test to have +SELECT run_command_on_workers('SET citus.enable_ddl_propagation TO off; +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors;'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57636,t,SET) + (localhost,57637,t,SET) +(2 rows) + +-- a non-distributed type dependency to a view +-- both the view and the type should be distributed after the upgrade +CREATE TYPE post_11_upgrade.my_type_for_view AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test_my_type (casted) AS SELECT row(measureid)::post_11_upgrade.my_type_for_view FROM sensors; +-- a local type, table and view, should not be distributed +-- after the upgrade +CREATE TYPE post_11_upgrade.local_type AS (a int); +CREATE TABLE post_11_upgrade.non_dist_table_for_view(a int, b post_11_upgrade.local_type); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view AS SELECT * FROM non_dist_table_for_view; +-- a local table joined with a distributed table. In other words, the view has a local table dependency +-- and should not be distributed after the upgrade +CREATE TABLE post_11_upgrade.non_dist_dist_table_for_view(a int); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view_local_join AS SELECT * FROM non_dist_table_for_view JOIN sensors ON (true); +-- a view selecting from multiple +-- distributed/reference tables should be marked as distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_multiple_dist_view AS SELECT colocated_dist_table.* FROM colocated_dist_table JOIN sensors ON (true) JOIN reference_table ON (true); +-- a view selecting from reference table should be fine +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view AS SELECT * FROM reference_table; +-- a view selecting from another (distributed) view should also be distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view_2 AS SELECT * FROM non_dist_upgrade_ref_view; +-- materialized views never becomes distributed +CREATE MATERIALIZED VIEW post_11_upgrade.materialized_view AS SELECT * FROM reference_table; +CREATE VIEW post_11_upgrade.owned_by_extension_view AS SELECT * FROM reference_table; +ALTER EXTENSION plpgsql ADD VIEW post_11_upgrade.owned_by_extension_view; +-- temporary views should not be marked as distributed +CREATE VIEW pg_temp.temp_view_1 AS SELECT * FROM reference_table; +CREATE temporary VIEW temp_view_2 AS SELECT * FROM reference_table; +-- we should be able to distribute recursive views +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT employee_id, + full_name AS subordinates +FROM employees +WHERE manager_id IS NULL +UNION ALL +SELECT e.employee_id, + (rl.subordinates || ' > ' || e.full_name) AS subordinates +FROM employees e +INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; +-- v_test_1 and v_test_2 becomes circularly dependend views +-- so we should not try to distribute any of the views +CREATE VIEW post_11_upgrade.v_test_1 AS SELECT * FROM sensors; +CREATE VIEW post_11_upgrade.v_test_2 AS SELECT * FROM sensors; +CREATE OR REPLACE VIEW post_11_upgrade.v_test_1 AS SELECT sensors.* FROM sensors JOIN v_test_2 USING (measureid); +CREATE OR REPLACE VIEW post_11_upgrade.v_test_2 AS SELECT sensors.* FROM sensors JOIN v_test_1 USING (measureid); +-- views that do not depeend on anything should be distributed +CREATE VIEW post_11_upgrade.depends_on_nothing_1 AS SELECT * FROM (VALUES (1)) as values; +CREATE VIEW post_11_upgrade.depends_on_nothing_2 AS SELECT 1; +-- views depends pg/citus objects should be distributed +CREATE VIEW post_11_upgrade.depends_on_pg AS SELECT * FROM pg_class; +CREATE VIEW post_11_upgrade.depends_on_citus AS SELECT * FROM pg_dist_partition; +-- views depend on sequences only should be distributed +CREATE SEQUENCE post_11_upgrade.seq_bigint AS bigint INCREMENT BY 3 CACHE 10 CYCLE; +CREATE VIEW post_11_upgrade.depends_on_seq AS SELECT nextval('post_11_upgrade.seq_bigint'); +-- views depend on a sequence and a local table should not be distributed +CREATE VIEW post_11_upgrade.depends_on_seq_and_no_support AS SELECT nextval('post_11_upgrade.seq_bigint') FROM post_11_upgrade.non_dist_table_for_view; RESET citus.enable_ddl_propagation; CREATE TABLE sensors_parser( measureid integer, diff --git a/src/test/regress/expected/view_propagation.out b/src/test/regress/expected/view_propagation.out new file mode 100644 index 000000000..821cfb401 --- /dev/null +++ b/src/test/regress/expected/view_propagation.out @@ -0,0 +1,598 @@ +-- Tests to check propagation of all view commands +CREATE SCHEMA view_prop_schema; +SET search_path to view_prop_schema; +-- Check creating views depending on different types of tables +-- and from multiple schemas +-- Check the most basic one +CREATE VIEW prop_view_basic AS SELECT 1; +-- Try to create view depending local table, then try to recreate it after distributing the table +CREATE TABLE view_table_1(id int, val_1 text); +CREATE VIEW prop_view_1 AS + SELECT * FROM view_table_1; +WARNING: "view prop_view_1" has dependency to "table view_table_1" that is not in Citus' metadata +DETAIL: "view prop_view_1" will be created only locally +HINT: Distribute "table view_table_1" first to distribute "view prop_view_1" +SELECT create_distributed_table('view_table_1', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_1 AS + SELECT * FROM view_table_1; +-- Try to create view depending local table, then try to recreate it after making the table reference table +CREATE TABLE view_table_2(id int PRIMARY KEY, val_1 text); +CREATE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; +WARNING: "view prop_view_2" has dependency to "table view_table_2" that is not in Citus' metadata +DETAIL: "view prop_view_2" will be created only locally +HINT: Distribute "table view_table_2" first to distribute "view prop_view_2" +SELECT create_reference_table('view_table_2'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; +-- Try to create view depending local table, then try to recreate it after making the table citus local table +CREATE TABLE view_table_3(id int, val_1 text); +CREATE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); +WARNING: "view prop_view_3" has dependency to "table view_table_3" that is not in Citus' metadata +DETAIL: "view prop_view_3" will be created only locally +HINT: Distribute "table view_table_3" first to distribute "view prop_view_3" +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +ALTER TABLE view_table_3 +ADD CONSTRAINT f_key_for_local_table +FOREIGN KEY(id) +REFERENCES view_table_2(id); +CREATE OR REPLACE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); +-- Try to create view depending on PG metadata table +CREATE VIEW prop_view_4 AS + SELECT * FROM pg_stat_activity; +-- Try to create view depending on Citus metadata table +CREATE VIEW prop_view_5 AS + SELECT * FROM citus_dist_stat_activity; +-- Try to create table depending on a local table from another schema, then try to create it again after distributing the table +CREATE SCHEMA view_prop_schema_inner; +SET search_path TO view_prop_schema_inner; +-- Create local table for tests below +CREATE TABLE view_table_4(id int, val_1 text); +-- Create a distributed table and view to test drop view below +CREATE TABLE inner_view_table(id int); +SELECT create_distributed_table('inner_view_table','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW inner_view_prop AS SELECT * FROM inner_view_table; +SET search_path to view_prop_schema; +CREATE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; +WARNING: "view prop_view_6" has dependency to "table view_prop_schema_inner.view_table_4" that is not in Citus' metadata +DETAIL: "view prop_view_6" will be created only locally +HINT: Distribute "table view_prop_schema_inner.view_table_4" first to distribute "view prop_view_6" +SELECT create_distributed_table('view_prop_schema_inner.view_table_4','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; +-- Show that all views are propagated as distributed object +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_%' ORDER BY 1; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_1}",{}) + (view,"{view_prop_schema,prop_view_2}",{}) + (view,"{view_prop_schema,prop_view_3}",{}) + (view,"{view_prop_schema,prop_view_4}",{}) + (view,"{view_prop_schema,prop_view_5}",{}) + (view,"{view_prop_schema,prop_view_6}",{}) + (view,"{view_prop_schema,prop_view_basic}",{}) +(7 rows) + +-- Check creating views depending various kind of objects +-- Tests will also check propagating dependent objects +-- Depending on function +SET citus.enable_ddl_propagation TO OFF; +CREATE OR REPLACE FUNCTION func_1_for_view(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return param_1; +END; +$$; +RESET citus.enable_ddl_propagation; +-- Show that function will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +CREATE VIEW prop_view_7 AS SELECT func_1_for_view(id) FROM view_table_1; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; + obj_identifier +--------------------------------------------------------------------- + (function,"{view_prop_schema,func_1_for_view}",{integer}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_7%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_7}",{}) +(1 row) + +-- Depending on type +SET citus.enable_ddl_propagation TO OFF; +CREATE TYPE type_for_view_prop AS ENUM ('a','b','c'); +RESET citus.enable_ddl_propagation; +-- Show that type will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +CREATE VIEW prop_view_8 AS SELECT val_1::type_for_view_prop FROM view_table_1; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; + obj_identifier +--------------------------------------------------------------------- + (type,{view_prop_schema.type_for_view_prop},{}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_8%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_8}",{}) +(1 row) + +-- Depending on another view +CREATE TABLE view_table_5(id int); +CREATE VIEW prop_view_9 AS SELECT * FROM view_table_5; +WARNING: "view prop_view_9" has dependency to "table view_table_5" that is not in Citus' metadata +DETAIL: "view prop_view_9" will be created only locally +HINT: Distribute "table view_table_5" first to distribute "view prop_view_9" +CREATE VIEW prop_view_10 AS SELECT * FROM prop_view_9; +WARNING: "view prop_view_10" has dependency to "table view_table_5" that is not in Citus' metadata +DETAIL: "view prop_view_10" will be created only locally +HINT: Distribute "table view_table_5" first to distribute "view prop_view_10" +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT create_distributed_table('view_table_5', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW prop_view_10 AS SELECT * FROM prop_view_9; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_9}",{}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,prop_view_10}",{}) +(1 row) + +-- Check views owned by non-superuser +SET client_min_messages TO ERROR; +CREATE USER view_creation_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER view_creation_user;$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +GRANT ALL PRIVILEGES ON SCHEMA view_prop_schema to view_creation_user; +SET ROLE view_creation_user; +CREATE TABLE user_owned_table_for_view(id int); +SELECT create_distributed_table('user_owned_table_for_view','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW view_owned_by_user AS SELECT * FROM user_owned_table_for_view; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,view_owned_by_user}",{}) +(1 row) + +DROP VIEW view_owned_by_user; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +DROP TABLE user_owned_table_for_view; +RESET ROLE; +RESET client_min_messages; +-- Create view with different options +CREATE TABLE view_table_6(id int, val_1 text); +SELECT create_distributed_table('view_table_6','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- TEMP VIEW is not supported. View will be created locally. +CREATE TEMP VIEW temp_prop_view AS SELECT * FROM view_table_6; +WARNING: "view temp_prop_view" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view temp_prop_view" will be created only locally +-- Recursive views are supported +CREATE RECURSIVE VIEW nums_1_100_prop_view (n) AS + VALUES (1) +UNION ALL + SELECT n+1 FROM nums_1_100_prop_view WHERE n < 100; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%nums_1_100_prop_view%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,nums_1_100_prop_view}",{}) +(1 row) + +-- Sequences are supported as dependency +CREATE SEQUENCE sequence_to_prop; +CREATE VIEW seq_view_prop AS SELECT sequence_to_prop.is_called FROM sequence_to_prop; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%sequence_to_prop%'; + obj_identifier +--------------------------------------------------------------------- + (sequence,"{view_prop_schema,sequence_to_prop}",{}) +(1 row) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%seq_view_prop%'; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,seq_view_prop}",{}) +(1 row) + +-- Views depend on temp sequences will be created locally +CREATE TEMPORARY SEQUENCE temp_sequence_to_drop; +CREATE VIEW temp_seq_view_prop AS SELECT temp_sequence_to_drop.is_called FROM temp_sequence_to_drop; +NOTICE: view "temp_seq_view_prop" will be a temporary view +WARNING: "view temp_seq_view_prop" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view temp_seq_view_prop" will be created only locally +-- Check circular dependencies are detected +CREATE VIEW circular_view_1 AS SELECT * FROM view_table_6; +CREATE VIEW circular_view_2 AS SELECT * FROM view_table_6; +CREATE OR REPLACE VIEW circular_view_1 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_2 USING (id); +CREATE OR REPLACE VIEW circular_view_2 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_1 USING (id); +ERROR: Citus can not handle circular dependencies between distributed objects +DETAIL: "view circular_view_2" circularly depends itself, resolve circular dependency first +-- Recursive views with distributed tables included +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT + employee_id, + full_name AS subordinates +FROM + employees +WHERE + manager_id IS NULL +UNION ALL + SELECT + e.employee_id, + ( + rl.subordinates || ' > ' || e.full_name + ) AS subordinates + FROM + employees e + INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; +-- Aliases are supported +CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6; +-- View options are supported +CREATE VIEW opt_prop_view + WITH(check_option=CASCADED, security_barrier=true) + AS SELECT * FROM view_table_6; +CREATE VIEW sep_opt_prop_view + AS SELECT * FROM view_table_6 + WITH LOCAL CHECK OPTION; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1; + obj_identifier +--------------------------------------------------------------------- + (view,"{view_prop_schema,aliased_opt_prop_view}",{}) + (view,"{view_prop_schema,opt_prop_view}",{}) + (view,"{view_prop_schema,sep_opt_prop_view}",{}) +(3 rows) + +-- Check definitions and reltoptions of views are correct on workers +\c - - - :worker_1_port +SELECT definition FROM pg_views WHERE viewname = 'aliased_opt_prop_view'; + definition +--------------------------------------------------------------------- + SELECT view_table_6.id AS alias_1, + + view_table_6.val_1 AS alias_2 + + FROM view_prop_schema.view_table_6; +(1 row) + +SELECT definition FROM pg_views WHERE viewname = 'opt_prop_view'; + definition +--------------------------------------------------------------------- + SELECT view_table_6.id, + + view_table_6.val_1 + + FROM view_prop_schema.view_table_6; +(1 row) + +SELECT definition FROM pg_views WHERE viewname = 'sep_opt_prop_view'; + definition +--------------------------------------------------------------------- + SELECT view_table_6.id, + + view_table_6.val_1 + + FROM view_prop_schema.view_table_6; +(1 row) + +SELECT relname, reloptions +FROM pg_class +WHERE + oid = 'view_prop_schema.aliased_opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.sep_opt_prop_view'::regclass::oid +ORDER BY 1; + relname | reloptions +--------------------------------------------------------------------- + aliased_opt_prop_view | + opt_prop_view | {check_option=cascaded,security_barrier=true} + sep_opt_prop_view | {check_option=local} +(3 rows) + +\c - - - :master_port +SET search_path to view_prop_schema; +-- Sync metadata to check it works properly after adding a view +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- Drop views and check metadata afterwards +DROP VIEW prop_view_9 CASCADE; +NOTICE: drop cascades to view prop_view_10 +DROP VIEW opt_prop_view, aliased_opt_prop_view, view_prop_schema_inner.inner_view_prop, sep_opt_prop_view; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%inner_view_prop%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Drop a column that view depends on +ALTER TABLE view_table_1 DROP COLUMN val_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view prop_view_1 +drop cascades to view prop_view_3 +drop cascades to view prop_view_8 +-- Since prop_view_3 depends on the view_table_1's val_1 column, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_3%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Drop a table that view depends on +DROP TABLE view_table_2 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view prop_view_2 +drop cascades to constraint f_key_for_local_table on table view_table_3 +NOTICE: drop cascades to constraint f_key_for_local_table_1410200 on table view_prop_schema.view_table_3_1410200 +CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" +PL/pgSQL function citus_drop_trigger() line XX at PERFORM +NOTICE: removing table view_prop_schema.view_table_3 from metadata as it is not connected to any reference tables via foreign keys +-- Since prop_view_2 depends on the view_table_2, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_2%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Show that unsupported CREATE OR REPLACE VIEW commands are catched by PG on the coordinator +CREATE TABLE table_to_test_unsup_view(id int, val1 text); +SELECT create_distributed_table('table_to_test_unsup_view', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW view_for_unsup_commands AS SELECT * FROM table_to_test_unsup_view; +CREATE OR REPLACE VIEW view_for_unsup_commands(a,b) AS SELECT * FROM table_to_test_unsup_view; +ERROR: cannot change name of view column "id" to "a" +HINT: Use ALTER VIEW ... RENAME COLUMN ... to change name of view column instead. +CREATE OR REPLACE VIEW view_for_unsup_commands AS SELECT id FROM table_to_test_unsup_view; +ERROR: cannot drop columns from view +-- ALTER VIEW PROPAGATION +CREATE TABLE alter_view_table(id int, val1 text); +SELECT create_distributed_table('alter_view_table','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table; +-- Set/drop default value is not supported by Citus +ALTER VIEW alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text; +ERROR: Citus doesn't support setting or resetting default values for a column of view +ALTER TABLE alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text; +ERROR: Citus doesn't support setting or resetting default values for a column of view +ALTER VIEW alter_view_1 ALTER COLUMN val1 DROP DEFAULT; +ERROR: Citus doesn't support setting or resetting default values for a column of view +ALTER TABLE alter_view_1 ALTER COLUMN val1 DROP DEFAULT; +ERROR: Citus doesn't support setting or resetting default values for a column of view +-- Set/reset options view alter view/alter table commands +ALTER VIEW alter_view_1 SET (check_option=cascaded); +ALTER VIEW alter_view_1 SET (security_barrier); +ALTER VIEW alter_view_1 SET (check_option=cascaded, security_barrier); +ALTER VIEW alter_view_1 SET (check_option=cascaded, security_barrier = true); +ALTER TABLE alter_view_1 SET (check_option=cascaded); +ALTER TABLE alter_view_1 SET (security_barrier); +ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier); +ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier = true); +-- Check the definition on both coordinator and worker node +SELECT definition FROM pg_views WHERE viewname = 'alter_view_1'; + definition +--------------------------------------------------------------------- + SELECT alter_view_table.id,+ + alter_view_table.val1 + + FROM alter_view_table; +(1 row) + +SELECT relname, reloptions +FROM pg_class +WHERE oid = 'view_prop_schema.alter_view_1'::regclass::oid; + relname | reloptions +--------------------------------------------------------------------- + alter_view_1 | {check_option=cascaded,security_barrier=true} +(1 row) + +\c - - - :worker_1_port +SELECT definition FROM pg_views WHERE viewname = 'alter_view_1'; + definition +--------------------------------------------------------------------- + SELECT alter_view_table.id,+ + alter_view_table.val1 + + FROM view_prop_schema.alter_view_table; +(1 row) + +SELECT relname, reloptions +FROM pg_class +WHERE oid = 'view_prop_schema.alter_view_1'::regclass::oid; + relname | reloptions +--------------------------------------------------------------------- + alter_view_1 | {check_option=cascaded,security_barrier=true} +(1 row) + +\c - - - :master_port +SET search_path to view_prop_schema; +ALTER TABLE alter_view_1 RESET (check_option, security_barrier); +ALTER VIEW alter_view_1 RESET (check_option, security_barrier); +-- Change the schema of the view +ALTER TABLE alter_view_1 SET SCHEMA view_prop_schema_inner; +ALTER VIEW view_prop_schema_inner.alter_view_1 SET SCHEMA view_prop_schema; +-- Rename view and view's column name +ALTER VIEW alter_view_1 RENAME COLUMN val1 TO val2; +ALTER VIEW alter_view_1 RENAME val2 TO val1; +ALTER VIEW alter_view_1 RENAME TO alter_view_2; +ALTER TABLE alter_view_2 RENAME COLUMN val1 TO val2; +ALTER TABLE alter_view_2 RENAME val2 TO val1; +ALTER TABLE alter_view_2 RENAME TO alter_view_1; +-- Alter owner vith alter view/alter table +SET client_min_messages TO ERROR; +CREATE USER alter_view_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER alter_view_user;$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +RESET client_min_messages; +ALTER VIEW alter_view_1 OWNER TO alter_view_user; +ALTER TABLE alter_view_1 OWNER TO alter_view_user; +-- Alter view owned by extension +CREATE TABLE table_for_ext_owned_view(id int); +CREATE VIEW extension_owned_view AS SELECT * FROM table_for_ext_owned_view; +WARNING: "view extension_owned_view" has dependency to "table table_for_ext_owned_view" that is not in Citus' metadata +DETAIL: "view extension_owned_view" will be created only locally +HINT: Distribute "table table_for_ext_owned_view" first to distribute "view extension_owned_view" +CREATE EXTENSION seg; +ALTER EXTENSION seg ADD VIEW extension_owned_view; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT create_distributed_table('table_for_ext_owned_view','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE VIEW extension_owned_view AS SELECT * FROM table_for_ext_owned_view; +-- Since the view is owned by extension Citus shouldn't propagate it +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%extension_owned_view%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +-- Try syncing metadata after running ALTER VIEW commands +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +-- Alter non-existing view +ALTER VIEW IF EXISTS non_existing_view ALTER COLUMN val1 SET DEFAULT random()::text; +NOTICE: relation "non_existing_view" does not exist, skipping +ALTER VIEW IF EXISTS non_existing_view SET (check_option=cascaded); +NOTICE: relation "non_existing_view" does not exist, skipping +ALTER VIEW IF EXISTS non_existing_view RENAME COLUMN val1 TO val2; +NOTICE: relation "non_existing_view" does not exist, skipping +ALTER VIEW IF EXISTS non_existing_view RENAME val2 TO val1; +NOTICE: relation "non_existing_view" does not exist, skipping +ALTER VIEW IF EXISTS non_existing_view SET SCHEMA view_prop_schema; +NOTICE: relation "non_existing_view" does not exist, skipping +-- Show that create view and alter view commands can be run from same transaction +-- but not the drop view. Since we can not use metadata connection for drop view commands +BEGIN; + SET LOCAL citus.force_max_query_parallelization TO ON; + CREATE TABLE table_1_to_view_in_transaction(a int); + SELECT create_distributed_table('table_1_to_view_in_transaction', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + CREATE TABLE table_2_to_view_in_transaction(a int); + SELECT create_distributed_table('table_2_to_view_in_transaction', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + -- we can create/alter/drop views even in parallel mode + CREATE VIEW view_in_transaction AS SELECT table_1_to_view_in_transaction.* FROM table_2_to_view_in_transaction JOIN table_1_to_view_in_transaction USING (a); + ALTER TABLE view_in_transaction SET (security_barrier); + ALTER VIEW view_in_transaction SET SCHEMA public; + ALTER VIEW public.view_in_transaction SET SCHEMA view_prop_schema_inner; + ALTER TABLE view_prop_schema_inner.view_in_transaction RENAME COLUMN a TO b; + DROP VIEW view_prop_schema_inner.view_in_transaction; +ERROR: cannot run view command because there was a parallel operation on a distributed table in the transaction +DETAIL: When running command on/for a distributed view, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +SET client_min_messages TO ERROR; +DROP SCHEMA view_prop_schema_inner CASCADE; +DROP SCHEMA view_prop_schema CASCADE; diff --git a/src/test/regress/expected/views_create.out b/src/test/regress/expected/views_create.out index acc8f002f..ddd787c7e 100644 --- a/src/test/regress/expected/views_create.out +++ b/src/test/regress/expected/views_create.out @@ -1,6 +1,15 @@ CREATE SCHEMA views_create; SET search_path TO views_create; CREATE TABLE view_test_table(a INT NOT NULL PRIMARY KEY, b BIGINT, c text); +SELECT create_distributed_table('view_test_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Since creating view distributed or locally depends on the arbitrary config +-- set client_min_messages to ERROR to get consistent result. +SET client_min_messages TO ERROR; CREATE OR REPLACE VIEW select_filtered_view AS SELECT * FROM view_test_table WHERE c = 'testing' WITH CASCADED CHECK OPTION; @@ -9,12 +18,7 @@ CREATE OR REPLACE VIEW select_all_view AS WITH LOCAL CHECK OPTION; CREATE OR REPLACE VIEW count_view AS SELECT COUNT(*) FROM view_test_table; -SELECT create_distributed_table('view_test_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - +RESET client_min_messages; INSERT INTO view_test_table VALUES (1,1,'testing'), (2,1,'views'); SELECT * FROM count_view; count @@ -42,6 +46,8 @@ SELECT * FROM select_filtered_view; -- dummy temp recursive view CREATE TEMP RECURSIVE VIEW recursive_defined_non_recursive_view(c) AS (SELECT 1); +WARNING: "view recursive_defined_non_recursive_view" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "view recursive_defined_non_recursive_view" will be created only locally CREATE MATERIALIZED VIEW select_all_matview AS SELECT * FROM view_test_table WITH DATA; diff --git a/src/test/regress/expected/with_prepare.out b/src/test/regress/expected/with_prepare.out index 993f905b8..6b0bf7d7f 100644 --- a/src/test/regress/expected/with_prepare.out +++ b/src/test/regress/expected/with_prepare.out @@ -211,6 +211,24 @@ ORDER BY user_id, time LIMIT 10; +-- +-- Test a prepared statement with unused argument +-- +CREATE TYPE foo as (x int, y int); +CREATE TABLE footest (x int, y int, z foo); +SELECT create_distributed_table('footest','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO footest VALUES(1, 2, (3,4)); +-- Add a redundant parameter +PREPARE prepared_test_9(foo,foo) AS +WITH a AS ( + SELECT * FROM footest WHERE z = $1 AND x = 1 OFFSET 0 +) +SELECT * FROM a; EXECUTE prepared_test_1; user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- @@ -883,6 +901,42 @@ EXECUTE prepared_test_8; (10 rows) ROLLBACK; +EXECUTE prepared_test_9('(3,4)','(2,3)'); + x | y | z +--------------------------------------------------------------------- + 1 | 2 | (3,4) +(1 row) + +EXECUTE prepared_test_9('(3,4)','(2,3)'); + x | y | z +--------------------------------------------------------------------- + 1 | 2 | (3,4) +(1 row) + +EXECUTE prepared_test_9('(3,4)','(2,3)'); + x | y | z +--------------------------------------------------------------------- + 1 | 2 | (3,4) +(1 row) + +EXECUTE prepared_test_9('(3,4)','(2,3)'); + x | y | z +--------------------------------------------------------------------- + 1 | 2 | (3,4) +(1 row) + +EXECUTE prepared_test_9('(3,4)','(2,3)'); + x | y | z +--------------------------------------------------------------------- + 1 | 2 | (3,4) +(1 row) + +EXECUTE prepared_test_9('(3,4)','(2,3)'); + x | y | z +--------------------------------------------------------------------- + 1 | 2 | (3,4) +(1 row) + EXECUTE prepared_partition_column_insert(1); user_id | time | value_1 | value_2 | value_3 | value_4 --------------------------------------------------------------------- diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index a017fe342..ba6ad675f 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -508,6 +508,7 @@ INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); +SET client_min_messages TO ERROR; SELECT 1 FROM master_activate_node('localhost', :worker_1_port); RESET client_min_messages; RESET citus.shard_replication_factor; diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 9eb45fe49..ac60c7c49 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -94,5 +94,6 @@ test: isolation_metadata_sync_deadlock test: isolation_replicated_dist_on_mx test: isolation_replicate_reference_tables_to_coordinator test: isolation_multiuser_locking +test: isolation_acquire_distributed_locks test: isolation_check_mx diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index e95676e2c..1897f7d6c 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -324,6 +324,7 @@ test: distributed_collations test: distributed_procedure test: distributed_collations_conflict test: function_propagation +test: view_propagation test: check_mx # --------- diff --git a/src/test/regress/multi_mx_schedule b/src/test/regress/multi_mx_schedule index ff5cccf11..546c9de44 100644 --- a/src/test/regress/multi_mx_schedule +++ b/src/test/regress/multi_mx_schedule @@ -43,7 +43,9 @@ test: coordinator_evaluation_modify test: coordinator_evaluation_select test: multi_mx_call test: multi_mx_function_call_delegation -test: multi_mx_modifications local_shard_execution local_shard_execution_replicated +test: multi_mx_modifications local_shard_execution_replicated +# the following test has to be run sequentially +test: local_shard_execution test: multi_mx_repartition_udt_w1 multi_mx_repartition_udt_w2 test: local_shard_copy test: undistribute_table_cascade_mx @@ -59,6 +61,7 @@ test: locally_execute_intermediate_results test: multi_mx_alter_distributed_table test: update_colocation_mx test: resync_metadata_with_sequences +test: distributed_locks # should be executed sequentially because it modifies metadata test: local_shard_execution_dropped_column diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index f709263fd..5692ed347 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -634,9 +634,8 @@ INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'super_packed_numbers_hash'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); +SET client_min_messages TO ERROR; SELECT 1 FROM master_activate_node('localhost', :worker_1_port); -NOTICE: Replicating postgres objects to node localhost:57637 -DETAIL: There are 115 objects to replicate, depending on your environment this might take a while ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 9b03b88d8..ff67ce433 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -464,9 +464,10 @@ push(@pgOptions, "citus.shard_replication_factor=2"); push(@pgOptions, "citus.node_connection_timeout=${connectionTimeout}"); push(@pgOptions, "citus.explain_analyze_sort_method='taskId'"); push(@pgOptions, "citus.enable_manual_changes_to_shards=on"); +push(@pgOptions, "citus.allow_unsafe_locks_from_workers=on"); # Some tests look at shards in pg_class, make sure we can usually see them: -push(@pgOptions, "citus.hide_shards_from_app_name_prefixes='psql,pg_dump'"); +push(@pgOptions, "citus.show_shards_for_app_name_prefixes='pg_regress'"); # we disable slow start by default to encourage parallelism within tests push(@pgOptions, "citus.executor_slow_start_interval=0ms"); diff --git a/src/test/regress/spec/isolation_acquire_distributed_locks.spec b/src/test/regress/spec/isolation_acquire_distributed_locks.spec new file mode 100644 index 000000000..eb6d51e68 --- /dev/null +++ b/src/test/regress/spec/isolation_acquire_distributed_locks.spec @@ -0,0 +1,243 @@ +#include "isolation_mx_common.include.spec" + +setup +{ + SELECT citus_set_coordinator_host('localhost', 57636); + + CREATE TABLE dist_table(a int); + CREATE TABLE citus_local_table(a int); + CREATE TABLE local_table(a int); + CREATE TABLE ref_table(a int); + + CREATE TABLE partitioned_table(a int) + PARTITION BY RANGE(a); + + CREATE TABLE partition_1 PARTITION OF partitioned_table + FOR VALUES FROM (1) TO (11); + + CREATE TABLE partition_2 PARTITION OF partitioned_table + FOR VALUES FROM (11) TO (21); + + SELECT create_distributed_table('dist_table', 'a'); + SELECT create_reference_table('ref_table'); + SELECT citus_add_local_table_to_metadata('citus_local_table'); + SELECT create_distributed_table('partitioned_table', 'a'); + + CREATE VIEW sub_view(a) AS + SELECT 2 * a AS a + FROM ref_table; + + CREATE VIEW main_view AS + SELECT t1.a a1, t2.a a2, t3.a a3 + FROM dist_table t1 + JOIN citus_local_table t2 ON t1.a = t2.a + JOIN sub_view t3 ON t2.a = t3.a; + + INSERT INTO dist_table SELECT n FROM generate_series(1, 5) n; + INSERT INTO citus_local_table SELECT n FROM generate_series(1, 5) n; + INSERT INTO local_table SELECT n FROM generate_series(1, 5) n; + INSERT INTO ref_table SELECT n FROM generate_series(1, 5) n; + INSERT INTO partitioned_table SELECT n FROM generate_series(8, 12) n; +} + +teardown +{ + DROP VIEW main_view; + DROP VIEW sub_view; + DROP TABLE dist_table; + DROP TABLE citus_local_table; + DROP TABLE local_table; + DROP TABLE ref_table; + DROP TABLE partitioned_table; + + SELECT citus_remove_node('localhost', 57636); + + SELECT citus_internal.restore_isolation_tester_func(); +} + +// coordinator session +session "coor" + +step "coor-begin" +{ + BEGIN; +} + +step "coor-acquire-aggresive-lock-on-dist-table" +{ + LOCK dist_table IN ACCESS EXCLUSIVE MODE; +} + +step "coor-acquire-aggresive-lock-on-dist-table-nowait" +{ + LOCK dist_table IN ACCESS EXCLUSIVE MODE NOWAIT; +} + +step "coor-acquire-weak-lock-on-dist-table" +{ + LOCK dist_table IN ACCESS SHARE MODE; +} + +step "coor-acquire-aggresive-lock-on-view" +{ + LOCK main_view IN ACCESS EXCLUSIVE MODE; +} + +step "coor-acquire-aggresive-lock-on-only-view" +{ + LOCK ONLY main_view IN ACCESS EXCLUSIVE MODE; +} + +step "coor-acquire-aggresive-lock-on-view-nowait" +{ + LOCK main_view IN ACCESS EXCLUSIVE MODE NOWAIT; +} + +step "coor-lock-all" +{ + LOCK dist_table, citus_local_table, ref_table, main_view, sub_view, local_table IN ACCESS EXCLUSIVE MODE; +} + +step "coor-read-dist-table" +{ + SELECT COUNT(*) FROM dist_table; +} + +step "coor-read-ref-table" +{ + SELECT COUNT(*) FROM ref_table; +} + +step "coor-acquire-aggresive-lock-on-partitioned-table" +{ + LOCK partitioned_table IN ACCESS EXCLUSIVE MODE; +} + +step "coor-acquire-aggresive-lock-on-partitioned-table-with-*-syntax" +{ + LOCK partitioned_table * IN ACCESS EXCLUSIVE MODE; +} + +step "coor-acquire-aggresive-lock-on-only-partitioned-table" +{ + LOCK ONLY partitioned_table IN ACCESS EXCLUSIVE MODE; +} + +step "coor-acquire-aggresive-lock-on-ref-table" +{ + LOCK ref_table IN ACCESS EXCLUSIVE MODE; +} + +step "coor-rollback" +{ + ROLLBACK; +} + +// worker 1 xact session +session "w1" + +step "w1-start-session-level-connection" +{ + SELECT start_session_level_connection_to_node('localhost', 57637); +} + +step "w1-begin" +{ + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +} + +step "w1-read-dist-table" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM dist_table'); +} + +step "w1-read-ref-table" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM ref_table'); +} + +step "w1-read-citus-local-table" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM citus_local_table'); +} + +step "w1-acquire-aggressive-lock-dist-table" { + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); +} + +step "w1-lock-reference-table" +{ + SELECT run_commands_on_session_level_connection_to_node('LOCK ref_table IN ACCESS EXCLUSIVE MODE'); +} + +step "w1-read-partitioned-table" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partitioned_table'); +} + +step "w1-read-partition-of-partitioned-table" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM partition_1'); +} + +step "w1-read-main-view" +{ + SELECT run_commands_on_session_level_connection_to_node('SELECT COUNT(*) FROM main_view'); +} + +step "w1-rollback" +{ + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); +} + +step "w1-stop-connection" +{ + SELECT stop_session_level_connection_to_node(); +} + +// worker 2 xact session +session "w2" + +step "w2-start-session-level-connection" +{ + SELECT start_session_level_connection_to_node('localhost', 57638); +} + +step "w2-begin" +{ + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +} + +step "w2-acquire-aggressive-lock-dist-table" { + SELECT run_commands_on_session_level_connection_to_node('LOCK dist_table IN ACCESS EXCLUSIVE MODE'); +} + +step "w2-rollback" +{ + SELECT run_commands_on_session_level_connection_to_node('ROLLBACK'); +} + +step "w2-stop-connection" +{ + SELECT stop_session_level_connection_to_node(); +} + +permutation "coor-begin" "coor-acquire-aggresive-lock-on-dist-table" "w1-start-session-level-connection" "w1-begin" "w1-read-dist-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-dist-table" "w1-start-session-level-connection" "w1-begin" "w1-acquire-aggressive-lock-dist-table" "coor-rollback" "coor-read-dist-table" "w1-rollback" "w1-stop-connection" +permutation "w1-start-session-level-connection" "w1-begin" "w1-acquire-aggressive-lock-dist-table" "coor-begin" "coor-acquire-aggresive-lock-on-dist-table-nowait" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "w1-start-session-level-connection" "w1-begin" "w2-start-session-level-connection" "w2-begin" "w1-acquire-aggressive-lock-dist-table" "w2-acquire-aggressive-lock-dist-table" "w1-rollback" "w1-read-dist-table" "w2-rollback" "w1-stop-connection" "w2-stop-connection" +permutation "coor-begin" "coor-acquire-weak-lock-on-dist-table" "w1-start-session-level-connection" "w1-begin" "w1-read-dist-table" "w1-acquire-aggressive-lock-dist-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "w1-start-session-level-connection" "w1-begin" "w1-lock-reference-table" "coor-begin" "coor-read-ref-table" "w1-rollback" "coor-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-view" "w1-start-session-level-connection" "w1-begin" "w1-read-dist-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-view" "w1-start-session-level-connection" "w1-begin" "w1-acquire-aggressive-lock-dist-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-view" "w1-start-session-level-connection" "w1-begin" "w1-read-ref-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-only-view" "w1-start-session-level-connection" "w1-begin" "w1-read-ref-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "w1-start-session-level-connection" "w1-begin" "w1-acquire-aggressive-lock-dist-table" "coor-begin" "coor-acquire-aggresive-lock-on-view-nowait" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-lock-all" "w1-start-session-level-connection" "w1-begin" "w1-read-citus-local-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-partitioned-table" "w1-start-session-level-connection" "w1-begin" "w1-read-partitioned-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-partitioned-table" "w1-start-session-level-connection" "w1-begin" "w1-read-partition-of-partitioned-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-partitioned-table-with-*-syntax" "w1-start-session-level-connection" "w1-begin" "w1-read-partition-of-partitioned-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-only-partitioned-table" "w1-start-session-level-connection" "w1-begin" "w1-read-partitioned-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-only-partitioned-table" "w1-start-session-level-connection" "w1-begin" "w1-read-partition-of-partitioned-table" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-acquire-aggresive-lock-on-ref-table" "w1-start-session-level-connection" "w1-begin" "w1-read-main-view" "coor-rollback" "w1-rollback" "w1-stop-connection" +permutation "coor-begin" "coor-read-dist-table" "w2-start-session-level-connection" "w2-begin" "w1-start-session-level-connection" "w1-begin" "w2-acquire-aggressive-lock-dist-table" "w1-acquire-aggressive-lock-dist-table" "coor-rollback" "w2-rollback" "w1-rollback" "w1-stop-connection" "w2-stop-connection" diff --git a/src/test/regress/spec/isolation_concurrent_dml.spec b/src/test/regress/spec/isolation_concurrent_dml.spec index a2220f3df..35f2fd7a4 100644 --- a/src/test/regress/spec/isolation_concurrent_dml.spec +++ b/src/test/regress/spec/isolation_concurrent_dml.spec @@ -3,8 +3,8 @@ setup SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.refresh_isolation_tester_prepared_statement(); CREATE TABLE test_concurrent_dml (test_id integer NOT NULL, data text); - SELECT master_create_distributed_table('test_concurrent_dml', 'test_id', 'hash'); - SELECT master_create_worker_shards('test_concurrent_dml', 4, 2); + SET citus.shard_replication_factor TO 2; + SELECT create_distributed_table('test_concurrent_dml', 'test_id', 'hash', shard_count:=4); } teardown diff --git a/src/test/regress/spec/isolation_dml_vs_repair.spec b/src/test/regress/spec/isolation_dml_vs_repair.spec index dbde8167e..ddf3c5b4f 100644 --- a/src/test/regress/spec/isolation_dml_vs_repair.spec +++ b/src/test/regress/spec/isolation_dml_vs_repair.spec @@ -1,24 +1,8 @@ setup { - CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, - distribution_column text, - distribution_method citus.distribution_type) - RETURNS void - LANGUAGE C STRICT - AS 'citus', $$master_create_distributed_table$$; - COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, - distribution_column text, - distribution_method citus.distribution_type) - IS 'define the table distribution functions'; - -- this function is dropped in Citus10, added here for tests - CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer, - replication_factor integer DEFAULT 2) - RETURNS void - AS 'citus', $$master_create_worker_shards$$ - LANGUAGE C STRICT; CREATE TABLE test_dml_vs_repair (test_id integer NOT NULL, data int); - SELECT master_create_distributed_table('test_dml_vs_repair', 'test_id', 'hash'); - SELECT master_create_worker_shards('test_dml_vs_repair', 1, 2); + SET citus.shard_replication_factor TO 2; + SELECT create_distributed_table('test_dml_vs_repair', 'test_id', 'hash', shard_count:=1); } teardown diff --git a/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec b/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec index 089ec473e..032bf0d02 100644 --- a/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec +++ b/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec @@ -89,9 +89,9 @@ step "s2-select-for-update" SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE'); } -step "s2-coordinator-create-index-concurrently" +step "s2-flaky-coordinator-create-index-concurrently" { - CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id); + CREATE INDEX CONCURRENTLY flaky_dist_table_index_conc ON dist_table(id); } step "s2-commit-worker" @@ -117,4 +117,4 @@ step "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-insert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-alter" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-begin" "s1-index" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit" "s2-commit-worker" "s2-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" -permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" +permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-flaky-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" diff --git a/src/test/regress/spec/isolation_hash_copy_vs_all.spec b/src/test/regress/spec/isolation_hash_copy_vs_all.spec index d7174b0cc..58ec28f5c 100644 --- a/src/test/regress/spec/isolation_hash_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_hash_copy_vs_all.spec @@ -78,7 +78,7 @@ step "s2-truncate" { TRUNCATE hash_copy; } step "s2-drop" { DROP TABLE hash_copy; } step "s2-ddl-create-index" { CREATE INDEX hash_copy_index ON hash_copy(id); } step "s2-ddl-drop-index" { DROP INDEX hash_copy_index; } -step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY hash_copy_index ON hash_copy(id); } +step "s2-flaky-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY flaky_hash_copy_index ON hash_copy(id); } step "s2-ddl-add-column" { ALTER TABLE hash_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE hash_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE hash_copy RENAME data TO new_column; } @@ -102,7 +102,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-s permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" +permutation "s1-initialize" "s1-begin" "s1-copy" "s2-flaky-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" diff --git a/src/test/regress/spec/isolation_reference_copy_vs_all.spec b/src/test/regress/spec/isolation_reference_copy_vs_all.spec index b327230d9..e08223db2 100644 --- a/src/test/regress/spec/isolation_reference_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_reference_copy_vs_all.spec @@ -68,7 +68,7 @@ step "s2-truncate" { TRUNCATE reference_copy; } step "s2-drop" { DROP TABLE reference_copy; } step "s2-ddl-create-index" { CREATE INDEX reference_copy_index ON reference_copy(id); } step "s2-ddl-drop-index" { DROP INDEX reference_copy_index; } -step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY reference_copy_index ON reference_copy(id); } +step "s2-flaky-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY flaky_reference_copy_index ON reference_copy(id); } step "s2-ddl-add-column" { ALTER TABLE reference_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE reference_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE reference_copy RENAME data TO new_column; } @@ -91,7 +91,7 @@ permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-s permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" +permutation "s1-initialize" "s1-begin" "s1-copy" "s2-flaky-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index f55f869dd..867e2bd3a 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -89,12 +89,14 @@ step "s2-view-worker" SELECT query, state, wait_event_type, wait_event, usename, datname FROM citus_stat_activity WHERE query NOT ILIKE ALL(VALUES + ('%application_name%'), ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), ('%citus_internal_local_blocked_processes%'), ('%add_node%'), - ('%csa_from_one_node%')) + ('%csa_from_one_node%'), + ('%pg_locks%')) AND is_worker_query = true AND backend_type = 'client backend' AND query != '' diff --git a/src/test/regress/spec/isolation_select_for_update.spec b/src/test/regress/spec/isolation_select_for_update.spec index 3eb16a94e..4dcd2f42c 100644 --- a/src/test/regress/spec/isolation_select_for_update.spec +++ b/src/test/regress/spec/isolation_select_for_update.spec @@ -10,7 +10,9 @@ setup SELECT create_distributed_table('test_table_1_rf1','id'); INSERT INTO test_table_1_rf1 values(1,2),(2,3),(3,4); + SET citus.enable_ddl_propagation TO OFF; CREATE VIEW test_1 AS SELECT * FROM test_table_1_rf1 WHERE val_1 = 2; + RESET citus.enable_ddl_propagation; CREATE TABLE test_table_2_rf1(id int, val_1 int); SELECT create_distributed_table('test_table_2_rf1','id'); diff --git a/src/test/regress/spec/isolation_select_vs_all_on_mx.spec b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec index 5ce2d0cce..66201af7f 100644 --- a/src/test/regress/spec/isolation_select_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec @@ -99,9 +99,9 @@ step "s2-select-for-update" SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table WHERE id = 6 FOR UPDATE'); } -step "s2-coordinator-create-index-concurrently" +step "s2-flaky-coordinator-create-index-concurrently" { - CREATE INDEX CONCURRENTLY select_table_index ON select_table(id); + CREATE INDEX CONCURRENTLY flaky_select_table_index ON select_table(id); } step "s2-commit-worker" @@ -135,4 +135,4 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-begin" "s2-index" "s1-commit-worker" "s2-commit" "s1-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" -permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-disable-binary-protocol-on-worker" "s1-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" +permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-disable-binary-protocol-on-worker" "s1-select" "s2-flaky-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" diff --git a/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec b/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec index 445005e66..ac87c4b05 100644 --- a/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec @@ -2,17 +2,29 @@ setup { - CREATE TABLE truncate_table(id integer, value integer); + CREATE TABLE truncate_table(id integer, value integer, PRIMARY KEY(id)); + CREATE TABLE data_table(id integer, value integer); + CREATE TABLE referencing_table_1 (id integer, PRIMARY KEY(id), FOREIGN KEY (id) REFERENCES truncate_table(id)); + CREATE TABLE referencing_table_2 (id integer, PRIMARY KEY(id), FOREIGN KEY (id) REFERENCES referencing_table_1(id)); + SELECT create_distributed_table('truncate_table', 'id'); + SELECT create_distributed_table('data_table', 'id'); + SELECT create_distributed_table('referencing_table_1', 'id'); + SELECT create_distributed_table('referencing_table_2', 'id'); + COPY truncate_table FROM PROGRAM 'echo 1, 10 && echo 2, 20 && echo 3, 30 && echo 4, 40 && echo 5, 50' WITH CSV; + COPY data_table FROM PROGRAM 'echo 20, 20 && echo 30, 30 && echo 40, 40 && echo 50, 50' WITH CSV; } // Create and use UDF to close the connection opened in the setup step. Also return the cluster // back to the initial state. teardown { - DROP TABLE IF EXISTS truncate_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); + DROP TABLE IF EXISTS data_table; + DROP TABLE IF EXISTS referencing_table_2; + DROP TABLE IF EXISTS referencing_table_1; + DROP TABLE IF EXISTS truncate_table CASCADE; + SELECT citus_internal.restore_isolation_tester_func(); } session "s1" @@ -36,7 +48,7 @@ step "s1-begin-on-worker" step "s1-truncate" { - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); } step "s1-select" @@ -46,7 +58,7 @@ step "s1-select" step "s1-insert-select" { - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO truncate_table SELECT * FROM truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO truncate_table SELECT * FROM data_table'); } step "s1-delete" @@ -56,7 +68,7 @@ step "s1-delete" step "s1-copy" { - SELECT run_commands_on_session_level_connection_to_node('COPY truncate_table FROM PROGRAM ''echo 5, 50 && echo 9, 90 && echo 10, 100''WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY truncate_table FROM PROGRAM ''echo 6, 60 && echo 9, 90 && echo 10, 100''WITH CSV'); } step "s1-alter" @@ -101,7 +113,7 @@ step "s2-begin-on-worker" step "s2-truncate" { - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table'); + SELECT run_commands_on_session_level_connection_to_node('TRUNCATE truncate_table CASCADE'); } step "s2-commit-worker" @@ -122,6 +134,11 @@ step "s3-select-count" SELECT COUNT(*) FROM truncate_table; } +step "s3-select-count-from-ref-table" +{ + SELECT COUNT(*) FROM referencing_table_2; +} + permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-truncate" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" @@ -131,3 +148,4 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-copy" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-begin" "s1-alter" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit" "s2-commit-worker" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select-for-update" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" +permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-truncate" "s3-select-count-from-ref-table" "s1-commit-worker" "s1-stop-connection" diff --git a/src/test/regress/sql/alter_distributed_table.sql b/src/test/regress/sql/alter_distributed_table.sql index 8e4076ac5..f9209eaba 100644 --- a/src/test/regress/sql/alter_distributed_table.sql +++ b/src/test/regress/sql/alter_distributed_table.sql @@ -300,5 +300,41 @@ CREATE MATERIALIZED VIEW test_mat_view_am USING COLUMNAR AS SELECT count(*), a F SELECT alter_distributed_table('test_am_matview', shard_count:= 52); SELECT amname FROM pg_am WHERE oid IN (SELECT relam FROM pg_class WHERE relname ='test_mat_view_am'); +-- verify that alter_distributed_table works if it has dependent views and materialized views +-- set colocate_with explicitly to not to affect other tables +CREATE SCHEMA schema_to_test_alter_dist_table; +SET search_path to schema_to_test_alter_dist_table; + +CREATE TABLE test_alt_dist_table_1(a int, b int); +SELECT create_distributed_table('test_alt_dist_table_1', 'a', colocate_with => 'None'); + +CREATE TABLE test_alt_dist_table_2(a int, b int); +SELECT create_distributed_table('test_alt_dist_table_2', 'a', colocate_with => 'test_alt_dist_table_1'); + +CREATE VIEW dependent_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2; +CREATE VIEW dependent_view_2 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2 JOIN test_alt_dist_table_1 USING(a); + +CREATE MATERIALIZED VIEW dependent_mat_view_1 AS SELECT test_alt_dist_table_2.* FROM test_alt_dist_table_2; + +-- Alter owner to make sure that alter_distributed_table doesn't change view's owner SET client_min_messages TO WARNING; +CREATE USER alter_dist_table_test_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER alter_dist_table_test_user$$); + +ALTER VIEW dependent_view_1 OWNER TO alter_dist_table_test_user; +ALTER VIEW dependent_view_2 OWNER TO alter_dist_table_test_user; +ALTER MATERIALIZED VIEW dependent_mat_view_1 OWNER TO alter_dist_table_test_user; + +SELECT alter_distributed_table('test_alt_dist_table_1', shard_count:=12, cascade_to_colocated:=true); +SELECT viewowner FROM pg_views WHERE viewname IN ('dependent_view_1', 'dependent_view_2'); +SELECT matviewowner FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1'; + +-- Check the existence of the view on the worker node as well +SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_1'$$); +SELECT run_command_on_workers($$SELECT viewowner FROM pg_views WHERE viewname = 'dependent_view_2'$$); +-- It is expected to not have mat view on worker node +SELECT run_command_on_workers($$SELECT count(*) FROM pg_matviews WHERE matviewname = 'dependent_mat_view_1';$$); +RESET search_path; + DROP SCHEMA alter_distributed_table CASCADE; +DROP SCHEMA schema_to_test_alter_dist_table CASCADE; diff --git a/src/test/regress/sql/alter_table_set_access_method.sql b/src/test/regress/sql/alter_table_set_access_method.sql index 7ddadc531..0ffabf664 100644 --- a/src/test/regress/sql/alter_table_set_access_method.sql +++ b/src/test/regress/sql/alter_table_set_access_method.sql @@ -224,7 +224,6 @@ select alter_table_set_access_method('local','columnar'); select alter_table_set_access_method('ref','columnar'); select alter_table_set_access_method('dist','columnar'); - SELECT alter_distributed_table('dist', shard_count:=1, cascade_to_colocated:=false); select alter_table_set_access_method('local','heap'); diff --git a/src/test/regress/sql/citus_local_table_triggers.sql b/src/test/regress/sql/citus_local_table_triggers.sql index 95b700a43..ed53fefad 100644 --- a/src/test/regress/sql/citus_local_table_triggers.sql +++ b/src/test/regress/sql/citus_local_table_triggers.sql @@ -292,7 +292,7 @@ BEGIN; SELECT * FROM reference_table; ROLLBACK; --- cannot perform remote execution from a trigger on a Citus local table +-- can perform remote execution from a trigger on a Citus local table BEGIN; -- update should actually update something to test ON UPDATE CASCADE logic INSERT INTO another_citus_local_table VALUES (600); diff --git a/src/test/regress/sql/citus_local_tables_queries_mx.sql b/src/test/regress/sql/citus_local_tables_queries_mx.sql index ddcc95d84..cad6a0386 100644 --- a/src/test/regress/sql/citus_local_tables_queries_mx.sql +++ b/src/test/regress/sql/citus_local_tables_queries_mx.sql @@ -445,20 +445,24 @@ SELECT count(*) FROM mat_view_4; SELECT count(*) FROM distributed_table WHERE b in (SELECT count FROM mat_view_4); +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_2 AS SELECT count(*) FROM citus_local_table JOIN citus_local_table_2 USING (a) JOIN distributed_table USING (a); +RESET citus.enable_ddl_propagation; -- should fail as view contains direct local dist join SELECT count(*) FROM view_2; +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW view_3 AS SELECT count(*) FROM citus_local_table_2 JOIN reference_table USING (a); +RESET citus.enable_ddl_propagation; -- ok SELECT count(*) FROM view_3; diff --git a/src/test/regress/sql/columnar_citus_integration.sql b/src/test/regress/sql/columnar_citus_integration.sql index 78d54ea26..a64a37108 100644 --- a/src/test/regress/sql/columnar_citus_integration.sql +++ b/src/test/regress/sql/columnar_citus_integration.sql @@ -1,4 +1,10 @@ -SET columnar.compression TO 'none'; + +SELECT success, result FROM run_command_on_all_nodes($cmd$ + ALTER SYSTEM SET columnar.compression TO 'none' +$cmd$); +SELECT success, result FROM run_command_on_all_nodes($cmd$ + SELECT pg_reload_conf() +$cmd$); CREATE SCHEMA columnar_citus_integration; SET search_path TO columnar_citus_integration; @@ -15,93 +21,93 @@ SELECT create_distributed_table('table_option', 'a'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', compression => 'pglz'); +ALTER TABLE table_option SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', compression => true); +ALTER TABLE table_option RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', compression_level => 13); +ALTER TABLE table_option SET (columnar.compression_level = 13); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', compression_level => true); +ALTER TABLE table_option RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', chunk_group_row_limit => 2000); +ALTER TABLE table_option SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', chunk_group_row_limit => true); +ALTER TABLE table_option RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', stripe_row_limit => 2000); +ALTER TABLE table_option SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', stripe_row_limit => true); +ALTER TABLE table_option RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 15); +ALTER TABLE table_option_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 15); SELECT create_distributed_table('table_option_2', 'a'); -- verify settings on placements SELECT run_command_on_placements('table_option_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify undistribute works SELECT undistribute_table('table_option'); SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option'::regclass; -SELECT compression FROM columnar.options WHERE regclass = 'table_option'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option'::regclass; DROP TABLE table_option, table_option_2; @@ -115,93 +121,93 @@ SELECT create_distributed_table('table_option', 'a'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', compression => 'pglz'); +ALTER TABLE table_option SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', compression => true); +ALTER TABLE table_option RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', compression_level => 17); +ALTER TABLE table_option SET (columnar.compression_level = 17); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', compression_level => true); +ALTER TABLE table_option RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', chunk_group_row_limit => 2000); +ALTER TABLE table_option SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', chunk_group_row_limit => true); +ALTER TABLE table_option RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option', stripe_row_limit => 2000); +ALTER TABLE table_option SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option', stripe_row_limit => true); +ALTER TABLE table_option RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 19); +ALTER TABLE table_option_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 19); SELECT create_distributed_table('table_option_2', 'a'); -- verify settings on placements SELECT run_command_on_placements('table_option_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify undistribute works SELECT undistribute_table('table_option'); SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option'::regclass; -SELECT compression FROM columnar.options WHERE regclass = 'table_option'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option'::regclass; DROP TABLE table_option, table_option_2; @@ -212,93 +218,93 @@ SELECT create_reference_table('table_option_reference'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_reference', compression => 'pglz'); +ALTER TABLE table_option_reference SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', compression => true); +ALTER TABLE table_option_reference RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_reference', compression_level => 11); +ALTER TABLE table_option_reference SET (columnar.compression_level = 11); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', compression_level => true); +ALTER TABLE table_option_reference RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_reference', chunk_group_row_limit => 2000); +ALTER TABLE table_option_reference SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', chunk_group_row_limit => true); +ALTER TABLE table_option_reference RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_reference', stripe_row_limit => 2000); +ALTER TABLE table_option_reference SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_reference', stripe_row_limit => true); +ALTER TABLE table_option_reference RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option_reference',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_reference_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_reference_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 9); +ALTER TABLE table_option_reference_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 9); SELECT create_reference_table('table_option_reference_2'); -- verify settings on placements SELECT run_command_on_placements('table_option_reference_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify undistribute works SELECT undistribute_table('table_option_reference'); SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option_reference'::regclass; -SELECT compression FROM columnar.options WHERE regclass = 'table_option_reference'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option_reference'::regclass; DROP TABLE table_option_reference, table_option_reference_2; @@ -312,93 +318,93 @@ SELECT citus_add_local_table_to_metadata('table_option_citus_local'); -- setting: compression -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', compression => 'pglz'); +ALTER TABLE table_option_citus_local SET (columnar.compression = pglz); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', compression => true); +ALTER TABLE table_option_citus_local RESET (columnar.compression); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: compression_level -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', compression_level => 11); +ALTER TABLE table_option_citus_local SET (columnar.compression_level = 11); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', compression_level => true); +ALTER TABLE table_option_citus_local RESET (columnar.compression_level); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT compression_level FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT compression_level FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: chunk_group_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', chunk_group_row_limit => 2000); +ALTER TABLE table_option_citus_local SET (columnar.chunk_group_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', chunk_group_row_limit => true); +ALTER TABLE table_option_citus_local RESET (columnar.chunk_group_row_limit); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT chunk_group_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT chunk_group_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- setting: stripe_row_limit -- get baseline for setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- change setting -SELECT alter_columnar_table_set('table_option_citus_local', stripe_row_limit => 2000); +ALTER TABLE table_option_citus_local SET (columnar.stripe_row_limit = 2000); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- reset setting -SELECT alter_columnar_table_reset('table_option_citus_local', stripe_row_limit => true); +ALTER TABLE table_option_citus_local RESET (columnar.stripe_row_limit); -- verify setting SELECT run_command_on_placements('table_option_citus_local',$cmd$ - SELECT stripe_row_limit FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT stripe_row_limit FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify settings are propagated when creating a table CREATE TABLE table_option_citus_local_2 (a int, b text) USING columnar; -SELECT alter_columnar_table_set('table_option_citus_local_2', - chunk_group_row_limit => 2000, - stripe_row_limit => 20000, - compression => 'pglz', - compression_level => 9); +ALTER TABLE table_option_citus_local_2 SET + (columnar.chunk_group_row_limit = 2000, + columnar.stripe_row_limit = 20000, + columnar.compression = pglz, + columnar.compression_level = 9); SELECT citus_add_local_table_to_metadata('table_option_citus_local_2'); -- verify settings on placements SELECT run_command_on_placements('table_option_citus_local_2',$cmd$ - SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE regclass = '%s'::regclass; + SELECT ROW(chunk_group_row_limit, stripe_row_limit, compression, compression_level) FROM columnar.options WHERE relation = '%s'::regclass; $cmd$); -- verify undistribute works SELECT undistribute_table('table_option_citus_local'); SELECT * FROM pg_dist_partition WHERE logicalrelid = 'table_option_citus_local'::regclass; -SELECT compression FROM columnar.options WHERE regclass = 'table_option_citus_local'::regclass; +SELECT compression FROM columnar.options WHERE relation = 'table_option_citus_local'::regclass; DROP TABLE table_option_citus_local, table_option_citus_local_2; SELECT 1 FROM master_remove_node('localhost', :master_port); diff --git a/src/test/regress/sql/columnar_create.sql b/src/test/regress/sql/columnar_create.sql index 4037dd28b..a0555fbdc 100644 --- a/src/test/regress/sql/columnar_create.sql +++ b/src/test/regress/sql/columnar_create.sql @@ -7,7 +7,7 @@ CREATE TABLE contestant (handle TEXT, birthdate DATE, rating INT, percentile FLOAT, country CHAR(3), achievements TEXT[]) USING columnar; -SELECT alter_columnar_table_set('contestant', compression => 'none'); +ALTER TABLE contestant SET (columnar.compression = none); CREATE INDEX contestant_idx on contestant(handle); @@ -29,14 +29,14 @@ INSERT INTO columnar_table_1 VALUES (1); CREATE MATERIALIZED VIEW columnar_table_1_mv USING columnar AS SELECT * FROM columnar_table_1; -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_table_1_mv_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_table_1_mv_storage_id FROM pg_class WHERE relname='columnar_table_1_mv' \gset -- test columnar_relation_set_new_filenode REFRESH MATERIALIZED VIEW columnar_table_1_mv; SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_mv_storage_id); -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_table_1_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_table_1_storage_id FROM pg_class WHERE relname='columnar_table_1' \gset BEGIN; @@ -60,7 +60,7 @@ CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset SELECT pg_backend_pid() AS val INTO old_backend_pid; @@ -85,7 +85,7 @@ INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -- test basic select SELECT COUNT(*) FROM columnar_temp WHERE i < 5; -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id +SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset BEGIN; @@ -107,7 +107,7 @@ BEGIN; -- force flushing stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,150000) i; - SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id + SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset COMMIT; @@ -120,7 +120,7 @@ BEGIN; -- force flushing stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,150000) i; - SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS columnar_temp_storage_id + SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id FROM pg_class WHERE relname='columnar_temp' \gset COMMIT; diff --git a/src/test/regress/sql/columnar_empty.sql b/src/test/regress/sql/columnar_empty.sql index 0f6eb1d27..2c4cb7c65 100644 --- a/src/test/regress/sql/columnar_empty.sql +++ b/src/test/regress/sql/columnar_empty.sql @@ -7,11 +7,11 @@ create table t_uncompressed(a int) using columnar; create table t_compressed(a int) using columnar; -- set options -SELECT alter_columnar_table_set('t_compressed', compression => 'pglz'); -SELECT alter_columnar_table_set('t_compressed', stripe_row_limit => 2000); -SELECT alter_columnar_table_set('t_compressed', chunk_group_row_limit => 1000); +ALTER TABLE t_compressed SET (columnar.compression = pglz); +ALTER TABLE t_compressed SET (columnar.stripe_row_limit = 2000); +ALTER TABLE t_compressed SET (columnar.chunk_group_row_limit = 1000); -SELECT * FROM columnar.options WHERE regclass = 't_compressed'::regclass; +SELECT * FROM columnar.options WHERE relation = 't_compressed'::regclass; -- select select * from t_uncompressed; diff --git a/src/test/regress/sql/columnar_fallback_scan.sql b/src/test/regress/sql/columnar_fallback_scan.sql index 3210f0738..28e521eaf 100644 --- a/src/test/regress/sql/columnar_fallback_scan.sql +++ b/src/test/regress/sql/columnar_fallback_scan.sql @@ -10,7 +10,7 @@ set columnar.enable_custom_scan = false; create table fallback_scan(i int) using columnar; -- large enough to test parallel_workers > 1 -select alter_columnar_table_set('fallback_scan', compression => 'none'); +ALTER TABLE fallback_scan SET (columnar.compression = none); insert into fallback_scan select generate_series(1,150000); vacuum analyze fallback_scan; diff --git a/src/test/regress/sql/columnar_first_row_number.sql b/src/test/regress/sql/columnar_first_row_number.sql index b1d0e7bd4..d58448f92 100644 --- a/src/test/regress/sql/columnar_first_row_number.sql +++ b/src/test/regress/sql/columnar_first_row_number.sql @@ -12,19 +12,19 @@ ROLLBACK; INSERT INTO col_table_1 SELECT i FROM generate_series(1, 12) i; -SELECT alter_columnar_table_set('col_table_1', stripe_row_limit => 1000); +ALTER TABLE col_table_1 SET (columnar.stripe_row_limit = 1000); INSERT INTO col_table_1 SELECT i FROM generate_series(1, 2350) i; SELECT row_count, first_row_number FROM columnar.stripe a -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid('col_table_1'::regclass) +WHERE a.storage_id = columnar.get_storage_id('col_table_1'::regclass) ORDER BY stripe_num; VACUUM FULL col_table_1; -- show that we properly update first_row_number after VACUUM FULL SELECT row_count, first_row_number FROM columnar.stripe a -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid('col_table_1'::regclass) +WHERE a.storage_id = columnar.get_storage_id('col_table_1'::regclass) ORDER BY stripe_num; TRUNCATE col_table_1; @@ -36,7 +36,7 @@ COMMIT; -- show that we start with first_row_number=1 after TRUNCATE SELECT row_count, first_row_number FROM columnar.stripe a -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid('col_table_1'::regclass) +WHERE a.storage_id = columnar.get_storage_id('col_table_1'::regclass) ORDER BY stripe_num; SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/columnar_indexes.sql b/src/test/regress/sql/columnar_indexes.sql index 8fec947b2..36a340719 100644 --- a/src/test/regress/sql/columnar_indexes.sql +++ b/src/test/regress/sql/columnar_indexes.sql @@ -407,12 +407,12 @@ INSERT INTO aborted_write_test VALUES (16999); REINDEX TABLE aborted_write_test; BEGIN; - ALTER TABLE columnar.stripe SET (autovacuum_enabled = false); - ALTER TABLE columnar.chunk SET (autovacuum_enabled = false); - ALTER TABLE columnar.chunk_group SET (autovacuum_enabled = false); + ALTER TABLE columnar_internal.stripe SET (autovacuum_enabled = false); + ALTER TABLE columnar_internal.chunk SET (autovacuum_enabled = false); + ALTER TABLE columnar_internal.chunk_group SET (autovacuum_enabled = false); DROP TABLE aborted_write_test; - TRUNCATE columnar.stripe, columnar.chunk, columnar.chunk_group; + TRUNCATE columnar_internal.stripe, columnar_internal.chunk, columnar_internal.chunk_group; CREATE TABLE aborted_write_test (a INT) USING columnar; @@ -422,12 +422,13 @@ BEGIN; SELECT FROM aborted_write_test; ROLLBACK TO SAVEPOINT svpt; - -- Already disabled autovacuum for all three metadata tables. - -- Here we truncate columnar.chunk and columnar.chunk_group but not - -- columnar.stripe to make sure that we properly handle dead tuples - -- in columnar.stripe, i.e. stripe metadata entries for aborted - -- transactions. - TRUNCATE columnar.chunk, columnar.chunk_group; + -- Already disabled autovacuum for all three metadata tables. Here + -- we truncate columnar_internal.chunk and + -- columnar.chunk_group but not columnar.stripe to + -- make sure that we properly handle dead tuples in + -- columnar.stripe, i.e. stripe metadata entries for + -- aborted transactions. + TRUNCATE columnar_internal.chunk, columnar_internal.chunk_group; CREATE INDEX ON aborted_write_test (a); ROLLBACK; @@ -477,7 +478,7 @@ rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); TRUNCATE uniq; @@ -489,7 +490,7 @@ rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); TRUNCATE uniq; @@ -501,7 +502,7 @@ rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); TRUNCATE uniq; @@ -513,7 +514,7 @@ rollback; insert into uniq select generate_series(1,100); SELECT COUNT(*)=1 FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); TRUNCATE uniq; @@ -529,12 +530,12 @@ begin; -- didn't flush anything yet, but should see the in progress stripe-write SELECT stripe_num, first_row_number, row_count FROM columnar.stripe cs - WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); + WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); commit; -- should have completed the stripe reservation SELECT stripe_num, first_row_number, row_count FROM columnar.stripe cs -WHERE cs.storage_id = columnar_test_helpers.columnar_relation_storageid('columnar_indexes.uniq'::regclass); +WHERE cs.storage_id = columnar.get_storage_id('columnar_indexes.uniq'::regclass); TRUNCATE uniq; diff --git a/src/test/regress/sql/columnar_insert.sql b/src/test/regress/sql/columnar_insert.sql index 9874109ca..3387f4a0a 100644 --- a/src/test/regress/sql/columnar_insert.sql +++ b/src/test/regress/sql/columnar_insert.sql @@ -119,7 +119,7 @@ DROP TABLE test_toast_columnar; -- We support writing into zero column tables, but not reading from them. -- We test that metadata makes sense so we can fix the read path in future. CREATE TABLE zero_col() USING columnar; -SELECT alter_columnar_table_set('zero_col', chunk_group_row_limit => 1000); +ALTER TABLE zero_col SET (columnar.chunk_group_row_limit = 1000); INSERT INTO zero_col DEFAULT VALUES; INSERT INTO zero_col DEFAULT VALUES; @@ -144,20 +144,20 @@ select from columnar_test_helpers.columnar_storage_info('zero_col'); SELECT relname, stripe_num, chunk_group_count, row_count FROM columnar.stripe a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname = 'zero_col' +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname = 'zero_col' ORDER BY 1,2,3,4; SELECT relname, stripe_num, value_count FROM columnar.chunk a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname = 'zero_col' +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname = 'zero_col' ORDER BY 1,2,3; SELECT relname, stripe_num, chunk_group_num, row_count FROM columnar.chunk_group a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname = 'zero_col' +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname = 'zero_col' ORDER BY 1,2,3,4; CREATE TABLE selfinsert(x int) USING columnar; -SELECT alter_columnar_table_set('selfinsert', stripe_row_limit => 1000); +ALTER TABLE selfinsert SET (columnar.stripe_row_limit = 1000); BEGIN; INSERT INTO selfinsert SELECT generate_series(1,1010); diff --git a/src/test/regress/sql/columnar_matview.sql b/src/test/regress/sql/columnar_matview.sql index ff526d1c0..a1662190f 100644 --- a/src/test/regress/sql/columnar_matview.sql +++ b/src/test/regress/sql/columnar_matview.sql @@ -19,23 +19,23 @@ SELECT * FROM t_view a ORDER BY a; -- show columnar options for materialized view SELECT * FROM columnar.options -WHERE regclass = 't_view'::regclass; +WHERE relation = 't_view'::regclass; -- show we can set options on a materialized view -SELECT alter_columnar_table_set('t_view', compression => 'pglz'); +ALTER TABLE t_view SET (columnar.compression = pglz); SELECT * FROM columnar.options -WHERE regclass = 't_view'::regclass; +WHERE relation = 't_view'::regclass; REFRESH MATERIALIZED VIEW t_view; -- verify options have not been changed SELECT * FROM columnar.options -WHERE regclass = 't_view'::regclass; +WHERE relation = 't_view'::regclass; SELECT * FROM t_view a ORDER BY a; -- verify that we have created metadata entries for the materialized view -SELECT columnar_test_helpers.columnar_relation_storageid(oid) AS storageid +SELECT columnar.get_storage_id(oid) AS storageid FROM pg_class WHERE relname='t_view' \gset SELECT count(*) FROM columnar.stripe WHERE storage_id=:storageid; diff --git a/src/test/regress/sql/columnar_permissions.sql b/src/test/regress/sql/columnar_permissions.sql index e131de55c..aca0fd5c7 100644 --- a/src/test/regress/sql/columnar_permissions.sql +++ b/src/test/regress/sql/columnar_permissions.sql @@ -1,17 +1,75 @@ +create table no_access (i int) using columnar; + +insert into no_access values(1); +insert into no_access values(2); +insert into no_access values(3); + select current_user \gset create user columnar_user; \c - columnar_user +-- owned by columnar_user create table columnar_permissions(i int) using columnar; insert into columnar_permissions values(1); +insert into columnar_permissions values(2); alter table columnar_permissions add column j int; -insert into columnar_permissions values(2,20); -vacuum columnar_permissions; +alter table columnar_permissions reset (columnar.compression); +alter table columnar_permissions set (columnar.compression = none); +select alter_columnar_table_reset('columnar_permissions', stripe_row_limit => true); +select alter_columnar_table_set('columnar_permissions', stripe_row_limit => 2222); + +select 1 from columnar.get_storage_id('columnar_permissions'::regclass); + +-- error +select 1 from columnar.get_storage_id('no_access'::regclass); + +-- only tuples related to columnar_permissions should be visible +select relation, chunk_group_row_limit, stripe_row_limit, compression, compression_level + from columnar.options + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); +select relation, stripe_num, row_count, first_row_number + from columnar.stripe + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); +select relation, stripe_num, attr_num, chunk_group_num, value_count + from columnar.chunk + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); +select relation, stripe_num, chunk_group_num, row_count + from columnar.chunk_group + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + truncate columnar_permissions; -drop table columnar_permissions; + +insert into columnar_permissions values(2,20); +insert into columnar_permissions values(2,30); +insert into columnar_permissions values(4,40); +insert into columnar_permissions values(5,50); + +vacuum columnar_permissions; + +-- error: columnar_user can't alter no_access +alter table no_access reset (columnar.stripe_row_limit); +alter table no_access set (columnar.stripe_row_limit = 12000); +select alter_columnar_table_reset('no_access', chunk_group_row_limit => true); +select alter_columnar_table_set('no_access', chunk_group_row_limit => 1111); \c - :current_user +-- should see tuples from both columnar_permissions and no_access +select relation, chunk_group_row_limit, stripe_row_limit, compression, compression_level + from columnar.options + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); +select relation, stripe_num, row_count, first_row_number + from columnar.stripe + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); +select relation, stripe_num, attr_num, chunk_group_num, value_count + from columnar.chunk + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); +select relation, stripe_num, chunk_group_num, row_count + from columnar.chunk_group + where relation in ('no_access'::regclass, 'columnar_permissions'::regclass); + +drop table columnar_permissions; +drop table no_access; diff --git a/src/test/regress/sql/columnar_pg15.sql b/src/test/regress/sql/columnar_pg15.sql new file mode 100644 index 000000000..86117910c --- /dev/null +++ b/src/test/regress/sql/columnar_pg15.sql @@ -0,0 +1,34 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif + +CREATE TABLE alter_am(i int); + +INSERT INTO alter_am SELECT generate_series(1,1000000); + +SELECT * FROM columnar.options WHERE regclass = 'alter_am'::regclass; +SELECT SUM(i) FROM alter_am; + +ALTER TABLE alter_am + SET ACCESS METHOD columnar, + SET (columnar.compression = pglz, fillfactor = 20); + +SELECT * FROM columnar.options WHERE regclass = 'alter_am'::regclass; +SELECT SUM(i) FROM alter_am; + +ALTER TABLE alter_am SET ACCESS METHOD heap; + +-- columnar options should be gone +SELECT * FROM columnar.options WHERE regclass = 'alter_am'::regclass; +SELECT SUM(i) FROM alter_am; + +-- error: setting columnar options must happen after converting to columnar +ALTER TABLE alter_am + SET (columnar.stripe_row_limit = 1111), + SET ACCESS METHOD columnar; + +DROP TABLE alter_am; diff --git a/src/test/regress/sql/columnar_recursive.sql b/src/test/regress/sql/columnar_recursive.sql index ada6167f6..08d77afdb 100644 --- a/src/test/regress/sql/columnar_recursive.sql +++ b/src/test/regress/sql/columnar_recursive.sql @@ -16,7 +16,7 @@ INSERT INTO t2 SELECT i, f(i) FROM generate_series(1, 5) i; -- there are no subtransactions, so above statement should batch -- INSERTs inside the UDF and create on stripe per table. SELECT relname, count(*) FROM columnar.stripe a, pg_class b -WHERE columnar_test_helpers.columnar_relation_storageid(b.oid)=a.storage_id AND relname IN ('t1', 't2') +WHERE columnar.get_storage_id(b.oid)=a.storage_id AND relname IN ('t1', 't2') GROUP BY relname ORDER BY relname; diff --git a/src/test/regress/sql/columnar_rollback.sql b/src/test/regress/sql/columnar_rollback.sql index 572246296..1f6991f76 100644 --- a/src/test/regress/sql/columnar_rollback.sql +++ b/src/test/regress/sql/columnar_rollback.sql @@ -6,7 +6,7 @@ CREATE TABLE t(a int, b int) USING columnar; CREATE VIEW t_stripes AS SELECT * FROM columnar.stripe a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname = 't'; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname = 't'; BEGIN; INSERT INTO t SELECT i, i+1 FROM generate_series(1, 10) i; diff --git a/src/test/regress/sql/columnar_tableoptions.sql b/src/test/regress/sql/columnar_tableoptions.sql index 0033042fc..34d882369 100644 --- a/src/test/regress/sql/columnar_tableoptions.sql +++ b/src/test/regress/sql/columnar_tableoptions.sql @@ -7,72 +7,76 @@ INSERT INTO table_options SELECT generate_series(1,100); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- test changing the compression -SELECT alter_columnar_table_set('table_options', compression => 'pglz'); +ALTER TABLE table_options SET (columnar.compression = pglz); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- test changing the compression level -SELECT alter_columnar_table_set('table_options', compression_level => 5); +ALTER TABLE table_options SET (columnar.compression_level = 5); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- test changing the chunk_group_row_limit -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 2000); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 2000); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- test changing the chunk_group_row_limit -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 4000); +ALTER TABLE table_options SET (columnar.stripe_row_limit = 4000); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- VACUUM FULL creates a new table, make sure it copies settings from the table you are vacuuming VACUUM FULL table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- set all settings at the same time -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 8000, chunk_group_row_limit => 4000, compression => 'none', compression_level => 7); +ALTER TABLE table_options SET + (columnar.stripe_row_limit = 8000, + columnar.chunk_group_row_limit = 4000, + columnar.compression = none, + columnar.compression_level = 7); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- make sure table options are not changed when VACUUM a table VACUUM table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- make sure table options are not changed when VACUUM FULL a table VACUUM FULL table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- make sure table options are not changed when truncating a table TRUNCATE table_options; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; ALTER TABLE table_options ALTER COLUMN a TYPE bigint; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- reset settings one by one to the version of the GUC's SET columnar.chunk_group_row_limit TO 1000; @@ -83,30 +87,30 @@ SET columnar.compression_level TO 11; -- verify setting the GUC's didn't change the settings -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -SELECT alter_columnar_table_reset('table_options', chunk_group_row_limit => true); +ALTER TABLE table_options RESET (columnar.chunk_group_row_limit); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -SELECT alter_columnar_table_reset('table_options', stripe_row_limit => true); +ALTER TABLE table_options RESET (columnar.stripe_row_limit); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -SELECT alter_columnar_table_reset('table_options', compression => true); +ALTER TABLE table_options RESET (columnar.compression); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -SELECT alter_columnar_table_reset('table_options', compression_level => true); +ALTER TABLE table_options RESET (columnar.compression_level); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- verify resetting all settings at once work SET columnar.chunk_group_row_limit TO 10000; @@ -116,44 +120,91 @@ SET columnar.compression_level TO 13; -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -SELECT alter_columnar_table_reset( - 'table_options', - chunk_group_row_limit => true, - stripe_row_limit => true, - compression => true, - compression_level => true); +ALTER TABLE table_options RESET + (columnar.chunk_group_row_limit, + columnar.stripe_row_limit, + columnar.compression, + columnar.compression_level); -- show table_options settings SELECT * FROM columnar.options -WHERE regclass = 'table_options'::regclass; +WHERE relation = 'table_options'::regclass; -- verify edge cases -- first start with a table that is not a columnar table CREATE TABLE not_a_columnar_table (a int); -SELECT alter_columnar_table_set('not_a_columnar_table', compression => 'pglz'); -SELECT alter_columnar_table_reset('not_a_columnar_table', compression => true); +ALTER TABLE not_a_columnar_table SET (columnar.compression = pglz); +ALTER TABLE not_a_columnar_table RESET (columnar.compression); -- verify you can't use a compression that is not known -SELECT alter_columnar_table_set('table_options', compression => 'foobar'); +ALTER TABLE table_options SET (columnar.compression = foobar); + +-- verify you can't use a columnar setting that is not known +ALTER TABLE table_options SET (columnar.foobar = 123); +ALTER TABLE table_options RESET (columnar.foobar); + +-- verify that invalid options are caught early, before query executes +-- (error should be about invalid options, not division-by-zero) +CREATE TABLE fail(i) USING columnar WITH (columnar.foobar = 123) AS SELECT 1/0; +CREATE TABLE fail(i) USING columnar WITH (columnar.compression = foobar) AS SELECT 1/0; -- verify cannot set out of range compression levels -SELECT alter_columnar_table_set('table_options', compression_level => 0); -SELECT alter_columnar_table_set('table_options', compression_level => 20); +ALTER TABLE table_options SET (columnar.compression_level = 0); +ALTER TABLE table_options SET (columnar.compression_level = 20); -- verify cannot set out of range stripe_row_limit & chunk_group_row_limit options -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 999); -SELECT alter_columnar_table_set('table_options', stripe_row_limit => 10000001); -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 999); -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 100001); -SELECT alter_columnar_table_set('table_options', chunk_group_row_limit => 0); +ALTER TABLE table_options SET (columnar.stripe_row_limit = 999); +ALTER TABLE table_options SET (columnar.stripe_row_limit = 10000001); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 999); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 100001); +ALTER TABLE table_options SET (columnar.chunk_group_row_limit = 0); INSERT INTO table_options VALUES (1); +-- multiple SET/RESET clauses +ALTER TABLE table_options + SET (columnar.compression = pglz, columnar.compression_level = 7), + SET (columnar.compression_level = 6); + +SELECT * FROM columnar.options +WHERE relation = 'table_options'::regclass; + +ALTER TABLE table_options + SET (columnar.compression = pglz, columnar.stripe_row_limit = 7777), + RESET (columnar.stripe_row_limit), + SET (columnar.chunk_group_row_limit = 5555); + +SELECT * FROM columnar.options +WHERE relation = 'table_options'::regclass; + +-- a no-op; shouldn't throw an error +ALTER TABLE IF EXISTS what SET (columnar.compression = lz4); + +-- a no-op; shouldn't throw an error +CREATE TABLE IF NOT EXISTS table_options(a int) USING columnar + WITH (columnar.compression_level = 4); + +-- test old interface based on functions +SELECT alter_columnar_table_reset('table_options', compression => true); +SELECT * FROM columnar.options WHERE relation = 'table_options'::regclass; +SELECT alter_columnar_table_set('table_options', compression_level => 1); +SELECT * FROM columnar.options WHERE relation = 'table_options'::regclass; + +-- error: set columnar options on heap tables +CREATE TABLE heap_options(i int) USING heap; +ALTER TABLE heap_options SET (columnar.stripe_row_limit = 12000); + +-- ordinarily, postgres allows bogus options for a RESET clause, +-- but if it's a heap table and someone specifies columnar options, +-- we block them +ALTER TABLE heap_options RESET (columnar.stripe_row_limit, foobar); +DROP TABLE heap_options; + -- verify options are removed when table is dropped DROP TABLE table_options; -- we expect no entries in çstore.options for anything not found int pg_class -SELECT * FROM columnar.options o WHERE o.regclass NOT IN (SELECT oid FROM pg_class); +SELECT * FROM columnar.options o WHERE o.relation NOT IN (SELECT oid FROM pg_class); SET client_min_messages TO warning; DROP SCHEMA am_tableoptions CASCADE; diff --git a/src/test/regress/sql/columnar_test_helpers.sql b/src/test/regress/sql/columnar_test_helpers.sql index f96dcdbf6..2e85ebc88 100644 --- a/src/test/regress/sql/columnar_test_helpers.sql +++ b/src/test/regress/sql/columnar_test_helpers.sql @@ -1,10 +1,6 @@ CREATE SCHEMA columnar_test_helpers; SET search_path TO columnar_test_helpers; -CREATE FUNCTION columnar_relation_storageid(relid oid) RETURNS bigint - LANGUAGE C STABLE STRICT - AS 'citus', $$columnar_relation_storageid$$; - CREATE OR REPLACE FUNCTION columnar_storage_info( rel regclass, version_major OUT int4, @@ -30,22 +26,22 @@ $$ LANGUAGE plpgsql; CREATE view chunk_group_consistency AS WITH a as ( SELECT storage_id, stripe_num, chunk_group_num, min(value_count) as row_count - FROM columnar.chunk + FROM columnar_internal.chunk GROUP BY 1,2,3 ), b as ( SELECT storage_id, stripe_num, chunk_group_num, max(value_count) as row_count - FROM columnar.chunk + FROM columnar_internal.chunk GROUP BY 1,2,3 ), c as ( (TABLE a EXCEPT TABLE b) UNION (TABLE b EXCEPT TABLE a) UNION - (TABLE a EXCEPT TABLE columnar.chunk_group) UNION (TABLE columnar.chunk_group EXCEPT TABLE a) + (TABLE a EXCEPT TABLE columnar_internal.chunk_group) UNION (TABLE columnar_internal.chunk_group EXCEPT TABLE a) ), d as ( SELECT storage_id, stripe_num, count(*) as chunk_group_count - FROM columnar.chunk_group + FROM columnar_internal.chunk_group GROUP BY 1,2 ), e as ( SELECT storage_id, stripe_num, chunk_group_count - FROM columnar.stripe + FROM columnar_internal.stripe ), f as ( (TABLE d EXCEPT TABLE e) UNION (TABLE e EXCEPT TABLE d) ) @@ -59,9 +55,9 @@ DECLARE BEGIN SELECT count(*) INTO union_storage_id_count FROM ( - SELECT storage_id FROM columnar.stripe UNION ALL - SELECT storage_id FROM columnar.chunk UNION ALL - SELECT storage_id FROM columnar.chunk_group + SELECT storage_id FROM columnar_internal.stripe UNION ALL + SELECT storage_id FROM columnar_internal.chunk UNION ALL + SELECT storage_id FROM columnar_internal.chunk_group ) AS union_storage_id WHERE storage_id=input_storage_id; diff --git a/src/test/regress/sql/columnar_truncate.sql b/src/test/regress/sql/columnar_truncate.sql index 9cdc44d55..b0ff482e4 100644 --- a/src/test/regress/sql/columnar_truncate.sql +++ b/src/test/regress/sql/columnar_truncate.sql @@ -90,7 +90,7 @@ INSERT INTO columnar_same_transaction_truncate SELECT * FROM generate_series(20, COMMIT; -- should output "1" for the newly created relation -SELECT count(distinct storage_id) - :columnar_data_files_before_truncate FROM columnar.stripe; +SELECT count(distinct storage_id) - :columnar_data_files_before_truncate FROM columnar_internal.stripe; SELECT * FROM columnar_same_transaction_truncate; DROP TABLE columnar_same_transaction_truncate; diff --git a/src/test/regress/sql/columnar_vacuum.sql b/src/test/regress/sql/columnar_vacuum.sql index 39f1b35a5..00f665fa8 100644 --- a/src/test/regress/sql/columnar_vacuum.sql +++ b/src/test/regress/sql/columnar_vacuum.sql @@ -6,7 +6,7 @@ CREATE TABLE t(a int, b int) USING columnar; CREATE VIEW t_stripes AS SELECT * FROM columnar.stripe a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname='t'; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname='t'; SELECT count(*) FROM t_stripes; @@ -34,7 +34,7 @@ select from columnar_test_helpers.columnar_storage_info('t'); -- test the case when all data cannot fit into a single stripe -SELECT alter_columnar_table_set('t', stripe_row_limit => 1000); +ALTER TABLE t SET (columnar.stripe_row_limit = 1000); INSERT INTO t SELECT i, 2 * i FROM generate_series(1,2500) i; SELECT sum(a), sum(b) FROM t; @@ -56,13 +56,13 @@ ALTER TABLE t DROP COLUMN a; SELECT stripe_num, attr_num, chunk_group_num, minimum_value IS NULL, maximum_value IS NULL FROM columnar.chunk a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; VACUUM FULL t; SELECT stripe_num, attr_num, chunk_group_num, minimum_value IS NULL, maximum_value IS NULL FROM columnar.chunk a, pg_class b -WHERE a.storage_id = columnar_test_helpers.columnar_relation_storageid(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; +WHERE a.storage_id = columnar.get_storage_id(b.oid) AND b.relname='t' ORDER BY 1, 2, 3; -- Make sure we cleaned-up the transient table metadata after VACUUM FULL commands SELECT count(distinct storage_id) - :columnar_table_count FROM columnar.stripe; @@ -91,15 +91,15 @@ SELECT count(*) FROM t; -- then vacuum to print stats BEGIN; -SELECT alter_columnar_table_set('t', - chunk_group_row_limit => 1000, - stripe_row_limit => 2000, - compression => 'pglz'); +ALTER TABLE t SET + (columnar.chunk_group_row_limit = 1000, + columnar.stripe_row_limit = 2000, + columnar.compression = pglz); SAVEPOINT s1; INSERT INTO t SELECT i FROM generate_series(1, 1500) i; ROLLBACK TO SAVEPOINT s1; INSERT INTO t SELECT i / 5 FROM generate_series(1, 1500) i; -SELECT alter_columnar_table_set('t', compression => 'none'); +ALTER TABLE t SET (columnar.compression = none); SAVEPOINT s2; INSERT INTO t SELECT i FROM generate_series(1, 1500) i; ROLLBACK TO SAVEPOINT s2; @@ -125,7 +125,7 @@ VACUUM VERBOSE t; -- vacuum full should remove chunks for dropped columns -- note that, a chunk will be stored in non-compressed for if compression -- doesn't reduce its size. -SELECT alter_columnar_table_set('t', compression => 'pglz'); +ALTER TABLE t SET (columnar.compression = pglz); VACUUM FULL t; VACUUM VERBOSE t; diff --git a/src/test/regress/sql/columnar_zstd.sql b/src/test/regress/sql/columnar_zstd.sql index ab8ed1b31..1723d418b 100644 --- a/src/test/regress/sql/columnar_zstd.sql +++ b/src/test/regress/sql/columnar_zstd.sql @@ -26,7 +26,7 @@ VACUUM FULL test_zstd; SELECT pg_relation_size('test_zstd') AS size_comp_level_default \gset -- change compression level -SELECT alter_columnar_table_set('test_zstd', compression_level => 19); +ALTER TABLE test_zstd SET (columnar.compression_level = 19); VACUUM FULL test_zstd; SELECT pg_relation_size('test_zstd') AS size_comp_level_19 \gset diff --git a/src/test/regress/sql/distributed_locks.sql b/src/test/regress/sql/distributed_locks.sql new file mode 100644 index 000000000..de7718cfe --- /dev/null +++ b/src/test/regress/sql/distributed_locks.sql @@ -0,0 +1,149 @@ +CREATE SCHEMA distribute_lock_tests; +SET search_path TO distribute_lock_tests; + +SET citus.next_shard_id TO 10000; + +CREATE TABLE dist_table(a int); + +SELECT create_distributed_table('dist_table', 'a'); + +INSERT INTO dist_table SELECT n FROM generate_series(1, 5) n; + +-- Test acquiring lock outside transaction +LOCK dist_table IN ACCESS EXCLUSIVE MODE; + +-- Test acquiring lock inside procedure +DO $$ +BEGIN +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +END; +$$; + +-- Try all valid lock options; also try omitting the optional TABLE keyword. +BEGIN TRANSACTION; +LOCK TABLE dist_table IN ACCESS SHARE MODE; +LOCK dist_table IN ROW SHARE MODE; +LOCK TABLE dist_table IN ROW EXCLUSIVE MODE; +LOCK TABLE dist_table IN SHARE UPDATE EXCLUSIVE MODE; +LOCK TABLE dist_table IN SHARE MODE; +LOCK dist_table IN SHARE ROW EXCLUSIVE MODE; +LOCK TABLE dist_table IN EXCLUSIVE MODE; +LOCK TABLE dist_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +-- Test that when the user does not have the required permissions to lock +-- the locks are not forwarded to the workers + +SET client_min_messages TO ERROR; +SELECT run_command_on_workers($$ + SET citus.enable_ddl_propagation TO OFF; + CREATE ROLE read_only_user WITH LOGIN; + RESET citus.enable_ddl_propagation; +$$); + +SET citus.enable_ddl_propagation TO OFF; +CREATE ROLE read_only_user WITH LOGIN; +GRANT ALL ON SCHEMA distribute_lock_tests TO read_only_user; +GRANT SELECT ON dist_table TO read_only_user; +RESET citus.enable_ddl_propagation; +RESET client_min_messages; + +SET ROLE read_only_user; +SET citus.log_remote_commands TO ON; + +BEGIN; +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +SET citus.log_remote_commands TO OFF; +RESET ROLE; + +-- test that user with view permissions can lock the tables +-- which the view is built on +CREATE VIEW myview AS SELECT * FROM dist_table; + +SET client_min_messages TO ERROR; +SELECT run_command_on_workers($$ + SET citus.enable_ddl_propagation TO OFF; + CREATE ROLE user_with_view_permissions WITH LOGIN; + GRANT ALL ON SCHEMA distribute_lock_tests TO user_with_view_permissions; + GRANT ALL ON distribute_lock_tests.myview TO user_with_view_permissions; + RESET citus.enable_ddl_propagation; +$$); + +SET citus.enable_ddl_propagation TO OFF; +CREATE ROLE user_with_view_permissions WITH LOGIN; +GRANT ALL ON SCHEMA distribute_lock_tests TO user_with_view_permissions; +GRANT ALL ON myview TO user_with_view_permissions; +RESET citus.enable_ddl_propagation; +RESET client_min_messages; + +SET ROLE TO user_with_view_permissions; + +BEGIN; +LOCK myview IN ACCESS EXCLUSIVE MODE; +SELECT run_command_on_workers($$ + SELECT mode FROM pg_locks WHERE relation = 'distribute_lock_tests.dist_table'::regclass; +$$); + +ROLLBACK; + +RESET ROLE; + +\c - - - :worker_1_port +SET search_path TO distribute_lock_tests; + +-- Test trying to lock from a worker when the coordinator is not in the metadata +SET citus.allow_unsafe_locks_from_workers TO 'off'; +BEGIN; +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +-- Verify that the same restriction does not apply to worker local tables +CREATE TABLE local_table(a int); + +-- Verify that no locks will be distributed for the local lock +SET citus.log_remote_commands TO ON; + +BEGIN; +LOCK local_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +RESET citus.log_remote_commands; + +-- Cleanup local table +DROP TABLE local_table; + +-- Test that setting the guc to 'on' will allow the lock from workers +SET citus.allow_unsafe_locks_from_workers TO 'on'; +BEGIN; +LOCK dist_table IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +-- Test locking a shard +SET citus.enable_manual_changes_to_shards TO OFF; +BEGIN; +LOCK dist_table_10000 IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +-- Test allowing shard locks with the citus.enable_manual_changes_to_shards guc +SET citus.enable_manual_changes_to_shards TO ON; + +BEGIN; +LOCK dist_table_10000 IN ACCESS EXCLUSIVE MODE; +ROLLBACK; + +RESET citus.enable_manual_changes_to_shards; + +\c - - - :master_port +DROP SCHEMA distribute_lock_tests CASCADE; +SET citus.enable_ddl_propagation TO OFF; +DROP ROLE read_only_user; +DROP ROLE user_with_view_permissions; +RESET citus.enable_ddl_propagation; +SELECT run_command_on_workers($$ + SET citus.enable_ddl_propagation TO OFF; + DROP USER read_only_user; + DROP USER user_with_view_permissions; + RESET citus.enable_ddl_propagation; +$$); diff --git a/src/test/regress/sql/distributed_planning.sql b/src/test/regress/sql/distributed_planning.sql index b19654ff4..c6a2bf29c 100644 --- a/src/test/regress/sql/distributed_planning.sql +++ b/src/test/regress/sql/distributed_planning.sql @@ -73,8 +73,10 @@ COMMIT; -- basic view queries +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW simple_view AS SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x); +RESET citus.enable_ddl_propagation; SELECT * FROM simple_view; SELECT * FROM simple_view, test WHERE test.x = simple_view.cnt; diff --git a/src/test/regress/sql/drop_partitioned_table.sql b/src/test/regress/sql/drop_partitioned_table.sql index a9842b10a..b1c64d5cb 100644 --- a/src/test/regress/sql/drop_partitioned_table.sql +++ b/src/test/regress/sql/drop_partitioned_table.sql @@ -52,22 +52,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') AND c.relkind IN ('r','p') ORDER BY 1, 2; -\c - - - :worker_1_port -SET search_path = drop_partitioned_table; -CREATE VIEW tables_info AS -SELECT n.nspname as "Schema", - c.relname as "Name", - CASE c.relkind WHEN 'r' THEN 'table' WHEN 'p' THEN 'partitioned table' END as "Type", - pg_catalog.pg_get_userbyid(c.relowner) as "Owner" -FROM pg_catalog.pg_class c - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - LEFT JOIN pg_user u ON u.usesysid = c.relowner -WHERE n.nspname IN ('drop_partitioned_table', 'schema1') - AND c.relkind IN ('r','p') -ORDER BY 1, 2; - -\c - - - :master_port -SET search_path = drop_partitioned_table; SET citus.next_shard_id TO 721000; -- CASE 1 diff --git a/src/test/regress/sql/failure_mx_metadata_sync.sql b/src/test/regress/sql/failure_mx_metadata_sync.sql index 5dfe88585..90e882fe5 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync.sql @@ -10,6 +10,9 @@ SET citus.shard_replication_factor TO 1; SELECT pg_backend_pid() as pid \gset SELECT citus.mitmproxy('conn.allow()'); +\set VERBOSITY terse +SET client_min_messages TO ERROR; + CREATE TABLE t1 (id int PRIMARY KEY); SELECT create_distributed_table('t1', 'id'); INSERT INTO t1 SELECT x FROM generate_series(1,100) AS f(x); @@ -25,21 +28,21 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to drop all tables in pg_dist_partition -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to delete pg_dist_node entries from the worker -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()'); SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -- Failure to populate pg_dist_node in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").cancel(' || :pid || ')'); SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()'); SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -- Verify that coordinator knows worker does not have valid metadata @@ -71,9 +74,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -- Failure to delete pg_dist_node entries from the worker -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); \c - - - :worker_2_port diff --git a/src/test/regress/sql/forcedelegation_functions.sql b/src/test/regress/sql/forcedelegation_functions.sql index d95cfc75a..8ec2f9ac8 100644 --- a/src/test/regress/sql/forcedelegation_functions.sql +++ b/src/test/regress/sql/forcedelegation_functions.sql @@ -128,11 +128,8 @@ ROLLBACK; BEGIN; -- Query gets delegated to the node of the shard xx_900001 for the key=1, -- and the function inserts value (1+17) locally on the shard xx_900031 +-- which is not allowed because this is not a regular pushdown SELECT insert_data(intcol+17) from test_forcepushdown where intcol = 1; - --- This will fail with duplicate error as the function already inserted --- the value(1+17) -SELECT insert_data(18); COMMIT; -- @@ -278,11 +275,9 @@ END; BEGIN; -- Query lands on the shard with key = 300(shard __900089) and the function inserts locally +-- which is not allowed because this is not a regular pushdown SELECT inner_force_delegation_function(id) FROM test_nested WHERE id = 300; --- Query lands on the shard with key = 300(shard __900089) and the function inserts remotely -SELECT insert_data_non_distarg(id) FROM test_nested WHERE id = 300; - END; -- @@ -294,6 +289,12 @@ BEGIN; SELECT inner_force_delegation_function((SELECT id+112 FROM test_nested WHERE id=400)); END; +BEGIN; +SET LOCAL citus.propagate_set_commands TO 'local'; +SET LOCAL citus.allow_nested_distributed_execution TO on; +SELECT inner_force_delegation_function((SELECT id+112 FROM test_nested WHERE id=400)); +END; + CREATE OR REPLACE FUNCTION test_non_constant(x int, y bigint) RETURNS int AS $$ diff --git a/src/test/regress/sql/function_create.sql b/src/test/regress/sql/function_create.sql new file mode 100644 index 000000000..2973769c9 --- /dev/null +++ b/src/test/regress/sql/function_create.sql @@ -0,0 +1,185 @@ +\set VERBOSITY terse +CREATE SCHEMA function_create; +SET search_path TO function_create; + +-- helper function to verify the function of a coordinator is the same on all workers +CREATE OR REPLACE FUNCTION verify_function_is_same_on_workers(funcname text) + RETURNS bool + LANGUAGE plpgsql +AS $func$ +DECLARE + coordinatorSql text; + workerSql text; +BEGIN + SELECT pg_get_functiondef(funcname::regprocedure) INTO coordinatorSql; + FOR workerSql IN SELECT result FROM run_command_on_workers('SELECT pg_get_functiondef(' || quote_literal(funcname) || '::regprocedure)') LOOP + IF workerSql != coordinatorSql THEN + RAISE INFO 'functions are different, coordinator:% worker:%', coordinatorSql, workerSql; + RETURN false; + END IF; + END LOOP; + + RETURN true; +END; +$func$; + +-- test delegating function calls +CREATE TABLE warnings ( + id int primary key, + message text +); + +SELECT create_distributed_table('warnings', 'id'); +INSERT INTO warnings VALUES (1, 'hello arbitrary config tests'); + +CREATE FUNCTION warning(int, text) +RETURNS void +LANGUAGE plpgsql AS $$ +BEGIN + RAISE WARNING '%', $2; +END; +$$; + +SELECT create_distributed_function('warning(int,text)','$1'); + +-- verify that the function definition is consistent in the cluster +SELECT verify_function_is_same_on_workers('function_create.warning(int,text)'); + +-- test a function that performs operation on the single shard of a reference table +CREATE TABLE monotonic_series(used_values int); +SELECT create_reference_table('monotonic_series'); +INSERT INTO monotonic_series VALUES (1), (3), (5); + +CREATE FUNCTION add_new_item_to_series() +RETURNS int +LANGUAGE SQL +AS $func$ +INSERT INTO monotonic_series SELECT max(used_values)+1 FROM monotonic_series RETURNING used_values; +$func$; + +-- Create and distribute a simple function +CREATE FUNCTION eq(macaddr, macaddr) RETURNS bool + AS 'select $1 = $2;' + LANGUAGE SQL + IMMUTABLE + RETURNS NULL ON NULL INPUT; + +-- testing alter statements for a distributed function +-- ROWS 5, untested because; +-- ERROR: ROWS is not applicable when function does not return a set +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); +ALTER FUNCTION eq(macaddr,macaddr) CALLED ON NULL INPUT IMMUTABLE SECURITY INVOKER PARALLEL UNSAFE COST 5; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); +ALTER FUNCTION eq(macaddr,macaddr) RETURNS NULL ON NULL INPUT STABLE SECURITY DEFINER PARALLEL RESTRICTED; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); +ALTER FUNCTION eq(macaddr,macaddr) STRICT VOLATILE PARALLEL SAFE; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); + +-- Test SET/RESET for alter function +ALTER ROUTINE eq(macaddr,macaddr) SET client_min_messages TO debug; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); +ALTER FUNCTION eq(macaddr,macaddr) RESET client_min_messages; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); +ALTER FUNCTION eq(macaddr,macaddr) SET search_path TO 'sch'';ma', public; +SELECT verify_function_is_same_on_workers('function_create.eq(macaddr,macaddr)'); +ALTER FUNCTION eq(macaddr,macaddr) RESET search_path; + +-- rename function and make sure the new name can be used on the workers +ALTER FUNCTION eq(macaddr,macaddr) RENAME TO eq2; +SELECT verify_function_is_same_on_workers('function_create.eq2(macaddr,macaddr)'); + +-- user-defined aggregates with & without strict +create function sum2_sfunc_strict(state int, x int) +returns int immutable strict language plpgsql as $$ +begin return state + x; +end; +$$; + +create function sum2_finalfunc_strict(state int) +returns int immutable strict language plpgsql as $$ +begin return state * 2; +end; +$$; + +create function sum2_sfunc(state int, x int) +returns int immutable language plpgsql as $$ +begin return state + x; +end; +$$; + +create function sum2_finalfunc(state int) +returns int immutable language plpgsql as $$ +begin return state * 2; +end; +$$; + +create aggregate sum2 (int) ( + sfunc = sum2_sfunc, + stype = int, + finalfunc = sum2_finalfunc, + combinefunc = sum2_sfunc, + initcond = '0' +); + +create aggregate sum2_strict (int) ( + sfunc = sum2_sfunc_strict, + stype = int, + finalfunc = sum2_finalfunc_strict, + combinefunc = sum2_sfunc_strict +); + +-- user-defined aggregates with multiple-parameters +create function psum_sfunc(s int, x int, y int) +returns int immutable language plpgsql as $$ +begin return coalesce(s,0) + coalesce(x*y+3,1); +end; +$$; + +create function psum_sfunc_strict(s int, x int, y int) +returns int immutable strict language plpgsql as $$ +begin return coalesce(s,0) + coalesce(x*y+3,1); +end; +$$; + +create function psum_combinefunc(s1 int, s2 int) +returns int immutable language plpgsql as $$ +begin return coalesce(s1,0) + coalesce(s2,0); +end; +$$; + +create function psum_combinefunc_strict(s1 int, s2 int) +returns int immutable strict language plpgsql as $$ +begin return coalesce(s1,0) + coalesce(s2,0); +end; +$$; + +create function psum_finalfunc(x int) +returns int immutable language plpgsql as $$ +begin return x * 2; +end; +$$; + +create function psum_finalfunc_strict(x int) +returns int immutable strict language plpgsql as $$ +begin return x * 2; +end; +$$; + +create aggregate psum(int, int)( + sfunc=psum_sfunc, + combinefunc=psum_combinefunc, + finalfunc=psum_finalfunc, + stype=int +); + +create aggregate psum_strict(int, int)( + sfunc=psum_sfunc_strict, + combinefunc=psum_combinefunc_strict, + finalfunc=psum_finalfunc_strict, + stype=int, + initcond=0 +); + +-- generate test data +create table aggdata (id int, key int, val int, valf float8); +select create_distributed_table('aggdata', 'id'); diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql index d4d3a7322..579a1aa9f 100644 --- a/src/test/regress/sql/function_propagation.sql +++ b/src/test/regress/sql/function_propagation.sql @@ -101,7 +101,7 @@ $$; SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid; SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2; --- Views are not supported +-- Views are supported CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) RETURNS int diff --git a/src/test/regress/sql/functions.sql b/src/test/regress/sql/functions.sql new file mode 100644 index 000000000..ce8bf38e9 --- /dev/null +++ b/src/test/regress/sql/functions.sql @@ -0,0 +1,56 @@ +\set VERBOSITY terse +SET search_path TO function_create; + +-- test user defined function with a distribution column argument +SELECT + warning (id, message) +FROM + warnings +WHERE + id = 1; + +SELECT warning (1, 'Push down to worker that holds the partition value of 1'); +SELECT warning (2, 'Push down to worker that holds the partition value of 2'); +SELECT warning (3, 'Push down to worker that holds the partition value of 3'); +SELECT warning (4, 'Push down to worker that holds the partition value of 4'); +SELECT warning (5, 'Push down to worker that holds the partition value of 5'); +SELECT warning (6, 'Push down to worker that holds the partition value of 6'); +SELECT warning (7, 'Push down to worker that holds the partition value of 7'); + + +-- insert some data to test user defined aggregates +INSERT INTO aggdata (id, key, val, valf) + VALUES (1, 1, 2, 11.2), + (2, 1, NULL, 2.1), + (3, 2, 2, 3.22), + (4, 2, 3, 4.23), + (5, 2, 5, 5.25), + (6, 3, 4, 63.4), + (7, 5, NULL, 75), + (8, 6, NULL, NULL), + (9, 6, NULL, 96), + (10, 7, 8, 1078), + (11, 9, 0, 1.19); + +-- test user defined aggregates +SELECT + key, + sum2 (val), + sum2_strict (val), + stddev(valf)::numeric(10, 5), + psum (val, valf::int), + psum_strict (val, valf::int) +FROM + aggdata +GROUP BY + key +ORDER BY + key; + +-- test function that writes to a reference table +SELECT add_new_item_to_series(); +SELECT add_new_item_to_series(); +SELECT add_new_item_to_series(); +SELECT add_new_item_to_series(); +SELECT add_new_item_to_series(); +SELECT add_new_item_to_series(); diff --git a/src/test/regress/sql/global_cancel.sql b/src/test/regress/sql/global_cancel.sql index 6dc58aa2d..d89b83175 100644 --- a/src/test/regress/sql/global_cancel.sql +++ b/src/test/regress/sql/global_cancel.sql @@ -1,6 +1,7 @@ CREATE SCHEMA global_cancel; SET search_path TO global_cancel; SET citus.next_shard_id TO 56789000; +SET citus.grep_remote_commands TO '%pg_cancel_backend%'; CREATE TABLE dist_table (a INT, b INT); SELECT create_distributed_table ('dist_table', 'a', shard_count:=4); diff --git a/src/test/regress/sql/local_dist_join_mixed.sql b/src/test/regress/sql/local_dist_join_mixed.sql index ebc475de1..b07da2fc8 100644 --- a/src/test/regress/sql/local_dist_join_mixed.sql +++ b/src/test/regress/sql/local_dist_join_mixed.sql @@ -408,4 +408,5 @@ JOIN USING (id); +SET client_min_messages TO ERROR; DROP SCHEMA local_dist_join_mixed CASCADE; diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index b68863a7f..b289f6bed 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -230,7 +230,9 @@ SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; INSERT INTO distributed_table VALUES (1, '22', 20); INSERT INTO second_distributed_table VALUES (1, '1'); +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; @@ -689,7 +691,16 @@ BEGIN; EXECUTE local_multi_row_insert_prepare_params(5,11); ROLLBACK; - +-- make sure that we still get results if we switch off local execution +PREPARE ref_count_prepare AS SELECT count(*) FROM reference_table; +EXECUTE ref_count_prepare; +EXECUTE ref_count_prepare; +EXECUTE ref_count_prepare; +EXECUTE ref_count_prepare; +EXECUTE ref_count_prepare; +SET citus.enable_local_execution TO off; +EXECUTE ref_count_prepare; +RESET citus.enable_local_execution; -- failures of local execution should rollback both the -- local execution and remote executions @@ -765,15 +776,19 @@ ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; @@ -922,6 +937,17 @@ SELECT create_distributed_table('event_responses', 'event_id'); INSERT INTO event_responses VALUES (1, 1, 'yes'), (2, 2, 'yes'), (3, 3, 'no'), (4, 4, 'no'); + +CREATE TABLE event_responses_no_pkey ( + event_id int, + user_id int, + response invite_resp +); + +SELECT create_distributed_table('event_responses_no_pkey', 'event_id'); + + + CREATE OR REPLACE FUNCTION regular_func(p invite_resp) RETURNS int AS $$ DECLARE @@ -1114,6 +1140,281 @@ INSERT INTO event_responses VALUES (16, 666, 'maybe'), (17, 777, 'no') ON CONFLICT (event_id, user_id) DO UPDATE SET response = EXCLUDED.response RETURNING *; +-- set back to sane settings +RESET citus.enable_local_execution; +RESET citus.enable_fast_path_router_planner; + + +-- we'll test some 2PC states +SET citus.enable_metadata_sync TO OFF; + +-- coordinated_transaction_should_use_2PC prints the internal +-- state for 2PC decision on Citus. However, even if 2PC is decided, +-- we may not necessarily use 2PC over a connection unless it does +-- a modification +CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() +RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', +$$coordinated_transaction_should_use_2PC$$; + +-- make tests consistent +SET citus.max_adaptive_executor_pool_size TO 1; + +RESET citus.enable_metadata_sync; +SELECT recover_prepared_transactions(); + + +SET citus.log_remote_commands TO ON; + +-- we use event_id = 2 for local execution and event_id = 1 for reemote execution +--show it here, if anything changes here, all the tests below might be broken +-- we prefer this to avoid excessive logging below +SELECT * FROM event_responses_no_pkey WHERE event_id = 2; +SELECT * FROM event_responses_no_pkey WHERE event_id = 1; +RESET citus.log_remote_commands; +RESET citus.log_local_commands; +RESET client_min_messages; + +-- single shard local command without transaction block does set the +-- internal state for 2PC, but does not require any actual entries +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *) +SELECT coordinated_transaction_should_use_2PC() FROM cte_1; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- two local commands without transaction block set the internal 2PC state +-- but does not use remotely +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local modification followed by another single shard +-- local modification sets the 2PC state, but does not use remotely +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local modification followed by a single shard +-- remote modification uses 2PC because multiple nodes involved +-- in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local modification followed by a single shard +-- remote modification uses 2PC even if it is not in an explicit +-- tx block as multiple nodes involved in the modification +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + + +-- single shard remote modification followed by a single shard +-- local modification uses 2PC as multiple nodes involved +-- in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard remote modification followed by a single shard +-- local modification uses 2PC even if it is not in an explicit +-- tx block +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local SELECT command without transaction block does not set the +-- internal state for 2PC +WITH cte_1 AS (SELECT * FROM event_responses_no_pkey WHERE event_id = 2) +SELECT coordinated_transaction_should_use_2PC() FROM cte_1; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- two local SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2), + cte_2 AS (SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2) +SELECT count(*) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- two local SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- a local SELECT followed by a remote SELECT does not require to +-- use actual 2PC +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + SELECT count(*) FROM event_responses_no_pkey; +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local SELECT followed by a single shard +-- remote modification does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + SELECT * FROM event_responses_no_pkey WHERE event_id = 2; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local SELECT followed by a single shard +-- remote modification does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (SELECT * FROM event_responses_no_pkey WHERE event_id = 2), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard remote modification followed by a single shard +-- local SELECT does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard remote modification followed by a single shard +-- local SELECT does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *), + cte_2 AS (SELECT * FROM event_responses_no_pkey WHERE event_id = 2) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- multi shard local SELECT command without transaction block does not set the +-- internal state for 2PC +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey) +SELECT coordinated_transaction_should_use_2PC() FROM cte_1; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- two multi-shard SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey), + cte_2 AS (SELECT count(*) FROM event_responses_no_pkey) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- two multi-shard SELECT commands without transaction block does not set the internal 2PC state +-- and does not use remotely +BEGIN; + SELECT count(*) FROM event_responses_no_pkey; + SELECT count(*) FROM event_responses_no_pkey; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- multi-shard shard SELECT followed by a single shard +-- remote modification does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + SELECT count(*) FROM event_responses_no_pkey; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- multi shard SELECT followed by a single shard +-- remote single shard modification does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (SELECT count(*) FROM event_responses_no_pkey), + cte_2 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard remote modification followed by a multi shard +-- SELECT does not use 2PC, because only a single +-- machine involved in the modification +BEGIN; + INSERT INTO event_responses_no_pkey VALUES (1, 2, 'yes') RETURNING *; + SELECT count(*) FROM event_responses_no_pkey; + SELECT coordinated_transaction_should_use_2PC(); +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard remote modification followed by a multi shard +-- SELECT does not use 2PC, because only a single +-- machine involved in the modification +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (1, 1, 'yes') RETURNING *), + cte_2 AS (SELECT count(*) FROM event_responses_no_pkey) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- single shard local modification followed by remote multi-shard +-- modification uses 2PC as multiple nodes are involved in modifications +WITH cte_1 AS (INSERT INTO event_responses_no_pkey VALUES (2, 2, 'yes') RETURNING *), + cte_2 AS (UPDATE event_responses_no_pkey SET user_id = 1000 RETURNING *) +SELECT bool_or(coordinated_transaction_should_use_2PC()) FROM cte_1, cte_2; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- a local SELECT followed by a remote multi-shard UPDATE requires to +-- use actual 2PC as multiple nodes are involved in modifications +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + UPDATE event_responses_no_pkey SET user_id = 1; +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- a local SELECT followed by a remote single-shard UPDATE does not require to +-- use actual 2PC. This is because a single node is involved in modification +BEGIN; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; + UPDATE event_responses_no_pkey SET user_id = 1 WHERE event_id = 1; +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + +-- a remote single-shard UPDATE followed by a local single shard SELECT +-- does not require to use actual 2PC. This is because a single node +-- is involved in modification +BEGIN; + UPDATE event_responses_no_pkey SET user_id = 1 WHERE event_id = 1; + SELECT count(*) FROM event_responses_no_pkey WHERE event_id = 2; +COMMIT; +SELECT count(*) FROM pg_dist_transaction; +SELECT recover_prepared_transactions(); + \c - - - :master_port -- verify the local_hostname guc is used for local executions that should connect to the diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql index 81b47cfc8..a8fe72b98 100644 --- a/src/test/regress/sql/local_shard_execution_replicated.sql +++ b/src/test/regress/sql/local_shard_execution_replicated.sql @@ -194,7 +194,9 @@ SELECT * FROM second_distributed_table WHERE key = 1 ORDER BY 1,2; -- Put row back for other tests INSERT INTO distributed_table VALUES (1, '22', 20); +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW abcd_view AS SELECT * FROM abcd; +RESET citus.enable_ddl_propagation; SELECT * FROM abcd first join abcd second on first.b = second.b ORDER BY 1,2,3,4; @@ -729,15 +731,19 @@ ROLLBACK; -- probably not a realistic case since views are not very -- well supported with MX +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution AS SELECT * FROM distributed_table WHERE key = 500; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution; -- similar test, but this time the view itself is a non-local -- query, but the query on the view is local +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW v_local_query_execution_2 AS SELECT * FROM distributed_table; +RESET citus.enable_ddl_propagation; SELECT * FROM v_local_query_execution_2 WHERE key = 500; diff --git a/src/test/regress/sql/master_copy_shard_placement.sql b/src/test/regress/sql/master_copy_shard_placement.sql index 8d5ed13b3..30f36d56d 100644 --- a/src/test/regress/sql/master_copy_shard_placement.sql +++ b/src/test/regress/sql/master_copy_shard_placement.sql @@ -95,7 +95,9 @@ SET citus.shard_replication_factor TO 1; -- metadata sync will succeed even if we have rep > 1 tables INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0); INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0); +SET client_min_messages TO warning; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +RESET client_min_messages; CREATE TABLE mx_table(a int); SELECT create_distributed_table('mx_table', 'a'); diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index b24c79232..4162ad7c6 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -8,6 +8,11 @@ ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); +-- I am coordinator +SELECT citus_is_coordinator(); +-- workers are not coordinator +SELECT result FROM run_command_on_workers('SELECT citus_is_coordinator()'); + -- get the active nodes SELECT master_get_active_worker_nodes(); @@ -51,6 +56,19 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); +-- disable node with sync/force options +SELECT citus_disable_node('localhost', :worker_1_port); +SELECT citus_disable_node('localhost', :worker_1_port, synchronous:=true); +SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + +-- disable node with sync/force options +SELECT citus_disable_node('localhost', :worker_2_port, synchronous:=true); +SELECT run_command_on_workers($$SELECT array_agg(isactive ORDER BY nodeport) FROM pg_dist_node WHERE hasmetadata and noderole='primary'::noderole AND nodecluster='default'$$); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); @@ -466,5 +484,10 @@ SELECT * from master_set_node_property('localhost', :worker_2_port, 'bogusproper DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated; +BEGIN; + SELECT start_metadata_sync_to_all_nodes(); +COMMIT; +SELECT start_metadata_sync_to_all_nodes(); + -- verify that at the end of this file, all primary nodes have metadata synced SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; diff --git a/src/test/regress/sql/multi_drop_extension.sql b/src/test/regress/sql/multi_drop_extension.sql index 7df4a19c0..8fd1daf27 100644 --- a/src/test/regress/sql/multi_drop_extension.sql +++ b/src/test/regress/sql/multi_drop_extension.sql @@ -67,6 +67,73 @@ DROP SCHEMA test_schema CASCADE; DROP EXTENSION citus CASCADE; \set VERBOSITY DEFAULT +-- Test if metadatacache is cleared after a rollback +BEGIN; +CREATE EXTENSION citus; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; + +-- Test if metadatacache is cleared for rollback subtransations +BEGIN; +SAVEPOINT my_savepoint; +CREATE EXTENSION citus; +ROLLBACK TO SAVEPOINT my_savepoint; +CREATE EXTENSION citus; +COMMIT; +DROP EXTENSION citus; + +-- Test if metadatacache is cleared if subtransaction commits but parent rollsback +BEGIN; +SAVEPOINT my_savepoint; +CREATE EXTENSION citus; +RELEASE SAVEPOINT my_savepoint; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; + +-- Test if metadatacache is cleared if we release a savepoint and rollback +BEGIN; +SAVEPOINT s1; +SAVEPOINT s2; +CREATE EXTENSION citus; +RELEASE SAVEPOINT s1; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; + +-- Test if metadatacache is cleared on a rollback in a nested subtransaction +BEGIN; +SAVEPOINT s1; +SAVEPOINT s2; +CREATE EXTENSION citus; +ROLLBACK TO s1; +CREATE EXTENSION citus; +COMMIT; +DROP EXTENSION citus; + +-- Test if metadatacache is cleared after columnar table is made and rollback happens +BEGIN; +SAVEPOINT s1; +CREATE EXTENSION citus; +SAVEPOINT s2; +CREATE TABLE foo1 (i int) using columnar; +SAVEPOINT s3; +ROLLBACK TO SAVEPOINT s1; +ROLLBACK; +CREATE EXTENSION citus; +DROP EXTENSION citus; + +-- Test with a release and rollback in transactions +BEGIN; +SAVEPOINT s1; +SAVEPOINT s2; +CREATE EXTENSION citus; +RELEASE SAVEPOINT s1; +SAVEPOINT s3; +SAVEPOINT s4; +ROLLBACK TO SAVEPOINT s3; +ROLLBACK; CREATE EXTENSION citus; -- this function is dropped in Citus10, added here for tests diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 97ace0673..0cab58944 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -92,12 +92,13 @@ FROM pg_depend AS pgd, WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND - pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar') + pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') ORDER BY 1, 2; -- DROP EXTENSION pre-created by the regression suite DROP EXTENSION citus; +DROP EXTENSION citus_columnar; \c -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created @@ -315,8 +316,8 @@ VACUUM columnar_table; TRUNCATE columnar_table; DROP TABLE columnar_table; CREATE INDEX ON columnar_table (a); -SELECT alter_columnar_table_set('columnar_table', compression => 'pglz'); -SELECT alter_columnar_table_reset('columnar_table'); +ALTER TABLE columnar_table SET(columnar.compression = pglz); +ALTER TABLE columnar_table RESET (columnar.compression); INSERT INTO columnar_table SELECT * FROM columnar_table; SELECT 1 FROM columnar_table; -- columnar custom scan @@ -459,10 +460,24 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '11.0-1'; SELECT * FROM multi_extension.print_extension_changes(); +-- Snapshot of state at 11.0-2 +ALTER EXTENSION citus UPDATE TO '11.0-2'; +SELECT * FROM multi_extension.print_extension_changes(); + +-- Test downgrade script (result should be empty) +ALTER EXTENSION citus UPDATE TO '11.0-1'; +ALTER EXTENSION citus UPDATE TO '11.0-2'; +SELECT * FROM multi_extension.print_extension_changes(); + -- Snapshot of state at 11.1-1 ALTER EXTENSION citus UPDATE TO '11.1-1'; SELECT * FROM multi_extension.print_extension_changes(); +-- Test downgrade script (result should be empty) +ALTER EXTENSION citus UPDATE TO '11.0-2'; +ALTER EXTENSION citus UPDATE TO '11.1-1'; +SELECT * FROM multi_extension.print_extension_changes(); + DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -476,13 +491,14 @@ FROM pg_depend AS pgd, WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND - pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar') + pgio.schema NOT IN ('pg_catalog', 'citus', 'citus_internal', 'test', 'columnar', 'columnar_internal') ORDER BY 1, 2; -- see incompatible version errors out RESET citus.enable_version_checks; RESET columnar.enable_version_checks; DROP EXTENSION citus; +DROP EXTENSION citus_columnar; CREATE EXTENSION citus VERSION '8.0-1'; -- Test non-distributed queries work even in version mismatch @@ -547,6 +563,7 @@ ALTER EXTENSION citus UPDATE; -- re-create in newest version DROP EXTENSION citus; +DROP EXTENSION citus_columnar; \c CREATE EXTENSION citus; @@ -554,6 +571,7 @@ CREATE EXTENSION citus; \c - - - :worker_1_port DROP EXTENSION citus; +DROP EXTENSION citus_columnar; SET citus.enable_version_checks TO 'false'; SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; diff --git a/src/test/regress/sql/multi_generate_ddl_commands.sql b/src/test/regress/sql/multi_generate_ddl_commands.sql index 4237d62b0..b4d04931a 100644 --- a/src/test/regress/sql/multi_generate_ddl_commands.sql +++ b/src/test/regress/sql/multi_generate_ddl_commands.sql @@ -116,7 +116,7 @@ ALTER TABLE fiddly_table SELECT master_get_table_ddl_events('fiddly_table'); --- propagating views is not supported +-- propagating views is not supported if local table dependency exists CREATE VIEW local_view AS SELECT * FROM simple_table; SELECT master_get_table_ddl_events('local_view'); diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 414b0d73f..81b6f5784 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -8,7 +8,6 @@ -- -- CREATE TEST TABLES -- - CREATE SCHEMA multi_index_statements; CREATE SCHEMA multi_index_statements_2; SET search_path TO multi_index_statements; @@ -22,7 +21,7 @@ SELECT master_create_empty_shard('index_test_range'); SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 2; -CREATE TABLE index_test_hash(a int, b int, c int); +CREATE TABLE index_test_hash(a int, b int, c int, a_text text, b_text text); SELECT create_distributed_table('index_test_hash', 'a', 'hash'); CREATE TABLE index_test_append(a int, b int, c int); @@ -74,9 +73,27 @@ END; $$ LANGUAGE plpgsql; SELECT create_distributed_function('multi_index_statements_2.value_plus_one(int)'); +CREATE FUNCTION predicate_stable() RETURNS bool IMMUTABLE +LANGUAGE plpgsql AS $$ +BEGIN + EXECUTE 'SELECT txid_current()'; + RETURN true; +END; $$; + CREATE INDEX ON index_test_hash ((value_plus_one(b))); +CREATE INDEX ON index_test_hash ((value_plus_one(b) + value_plus_one(c))) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (a) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (abs(a)) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (value_plus_one(a)) WHERE c > 10; CREATE INDEX ON index_test_hash ((multi_index_statements.value_plus_one(b))); CREATE INDEX ON index_test_hash ((multi_index_statements_2.value_plus_one(b))); +CREATE INDEX ON index_test_hash (a) INCLUDE (b) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash (c, (c+0)) INCLUDE (a); +CREATE INDEX ON index_test_hash (value_plus_one(a)) INCLUDE (c,b) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash ((a_text || b_text)); +CREATE INDEX ON index_test_hash ((a_text || b_text)) WHERE value_plus_one(c) > 10; +CREATE INDEX ON index_test_hash ((a_text || b_text)) WHERE (a_text || b_text) = 'ttt'; +CREATE INDEX CONCURRENTLY ON index_test_hash (a) WHERE predicate_stable(); -- Verify that we handle if not exists statements correctly CREATE INDEX lineitem_orderkey_index on public.lineitem(l_orderkey); @@ -104,13 +121,7 @@ CLUSTER local_table USING local_table_index; DROP TABLE local_table; --- Verify that all indexes got created on the master node and one of the workers -SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; -\c - - - :worker_1_port -SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_%' ORDER BY relname LIMIT 1); -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash_%'; -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range_%'; -SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append_%'; + \c - - - :master_port SET search_path TO multi_index_statements, public; @@ -138,9 +149,6 @@ CREATE INDEX ON lineitem (l_orderkey); CREATE UNIQUE INDEX ON index_test_hash(a); CREATE INDEX CONCURRENTLY ON lineitem USING hash (l_shipdate); --- Verify that none of failed indexes got created on the master node -SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; - -- -- REINDEX -- @@ -183,6 +191,30 @@ DROP INDEX index_test_hash_index_a_b_partial; -- Verify that we can drop indexes concurrently DROP INDEX CONCURRENTLY lineitem_concurrently_index; +-- Verify that all indexes got created on the coordinator node and on the workers +-- by dropping the indexes. We do this because in different PG versions, +-- the expression indexes are named differently +-- and, being able to drop the index ensures that the index names are +-- proper +CREATE OR REPLACE FUNCTION drop_all_indexes(table_name regclass) RETURNS INTEGER AS $$ +DECLARE + i RECORD; +BEGIN + FOR i IN + (SELECT indexrelid::regclass::text as relname FROM pg_index + WHERE indrelid = table_name and indexrelid::regclass::text not ilike '%pkey%') + LOOP + EXECUTE 'DROP INDEX ' || i.relname; + END LOOP; +RETURN 1; +END; +$$ LANGUAGE plpgsql; + +SELECT drop_all_indexes('public.lineitem'); +SELECT drop_all_indexes('index_test_range'); +SELECT drop_all_indexes('index_test_hash'); +SELECT drop_all_indexes('index_test_append'); + -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%' ORDER BY 1,2; diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index 8fcb89e1e..81970df62 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -137,27 +137,26 @@ SET columnar.chunk_group_row_limit = 1050; -- create columnar table CREATE TABLE columnar_table (a int) USING columnar; -- alter a columnar table that is created by that unprivileged user -SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000); +ALTER TABLE columnar_table SET (columnar.chunk_group_row_limit = 2000); -- insert some data and read INSERT INTO columnar_table VALUES (1), (1); SELECT * FROM columnar_table; -- Fail to alter a columnar table that is created by a different user SET ROLE full_access; -SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000); +ALTER TABLE columnar_table SET (columnar.chunk_group_row_limit = 2000); -- Fail to reset a columnar table value created by a different user -SELECT alter_columnar_table_reset('columnar_table', chunk_group_row_limit => true); +ALTER TABLE columnar_table RESET (columnar.chunk_group_row_limit); SET ROLE read_access; -- and drop it DROP TABLE columnar_table; -- cannot modify columnar metadata table as unprivileged user -INSERT INTO columnar.stripe VALUES(99); +INSERT INTO columnar_internal.stripe VALUES(99); -- Cannot drop columnar metadata table as unprivileged user. -- Privileged user also cannot drop but with a different error message. -- (since citus extension has a dependency to it) -DROP TABLE columnar.chunk; +DROP TABLE columnar_internal.chunk; --- cannot read columnar.chunk since it could expose chunk min/max values SELECT * FROM columnar.chunk; -- test whether a read-only user can read from citus_tables view diff --git a/src/test/regress/sql/multi_mx_function_call_delegation.sql b/src/test/regress/sql/multi_mx_function_call_delegation.sql index 0efcea922..e4eeaebc2 100644 --- a/src/test/regress/sql/multi_mx_function_call_delegation.sql +++ b/src/test/regress/sql/multi_mx_function_call_delegation.sql @@ -2,6 +2,7 @@ CREATE SCHEMA multi_mx_function_call_delegation; SET search_path TO multi_mx_function_call_delegation, public; +\set VERBOSITY terse SET citus.shard_replication_factor TO 2; @@ -256,6 +257,7 @@ select start_metadata_sync_to_node('localhost', :worker_2_port); -- worker backend caches inconsistent. Reconnect to coordinator to use -- new worker connections, hence new backends. \c - - - :master_port +\set VERBOSITY terse SET search_path to multi_mx_function_call_delegation, public; SET client_min_messages TO DEBUG1; SET citus.shard_replication_factor = 1; @@ -310,6 +312,7 @@ EXECUTE call_plan(2, 0); EXECUTE call_plan(2, 0); \c - - - :worker_1_port +\set VERBOSITY terse SET search_path TO multi_mx_function_call_delegation, public; -- create_distributed_function is disallowed from worker nodes select create_distributed_function('mx_call_func(int,int)'); @@ -343,6 +346,7 @@ END; $$ LANGUAGE plpgsql; \c - - - :master_port +\set VERBOSITY terse SET search_path TO multi_mx_function_call_delegation, public; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index b56329150..281815d4c 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -43,7 +43,9 @@ SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_name -- make sure that pg_class queries do not get blocked on table locks begin; +SET LOCAL citus.enable_ddl_propagation TO OFF; lock table test_table in access exclusive mode; + prepare transaction 'take-aggressive-lock'; -- shards are hidden when using psql as application_name @@ -67,7 +69,7 @@ SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_name SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; -- changing application_name reveals the shards -SET application_name TO ''; +SET application_name TO 'pg_regress'; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; RESET application_name; @@ -76,7 +78,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- changing application_name in transaction reveals the shards BEGIN; -SET LOCAL application_name TO ''; +SET LOCAL application_name TO 'pg_regress'; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; ROLLBACK; @@ -85,7 +87,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- now with session-level GUC, but ROLLBACK; BEGIN; -SET application_name TO ''; +SET application_name TO 'pg_regress'; ROLLBACK; -- shards are hidden again after GUCs are reset @@ -94,7 +96,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name -- we should hide correctly based on application_name with savepoints BEGIN; SAVEPOINT s1; -SET application_name TO ''; +SET application_name TO 'pg_regress'; -- changing application_name reveals the shards SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; ROLLBACK TO SAVEPOINT s1; @@ -102,9 +104,9 @@ ROLLBACK TO SAVEPOINT s1; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; ROLLBACK; --- changing citus.hide_shards_from_app_name_prefixes reveals the shards +-- changing citus.show_shards_for_app_name_prefix reveals the shards BEGIN; -SET LOCAL citus.hide_shards_from_app_name_prefixes TO 'notpsql'; +SET LOCAL citus.show_shards_for_app_name_prefixes TO 'psql'; SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; ROLLBACK; diff --git a/src/test/regress/sql/multi_mx_truncate_from_worker.sql b/src/test/regress/sql/multi_mx_truncate_from_worker.sql index 380c4b9dd..0df93840e 100644 --- a/src/test/regress/sql/multi_mx_truncate_from_worker.sql +++ b/src/test/regress/sql/multi_mx_truncate_from_worker.sql @@ -146,8 +146,8 @@ RESET client_min_messages; -- also test the infrastructure that is used for supporting -- TRUNCATE from worker nodes --- should fail since it is not in transaction block -SELECT lock_relation_if_exists('on_update_fkey_table', 'ACCESS SHARE'); +-- should pass since we don't check for xact block in lock_relation_if_exists +SELECT lock_relation_if_exists('truncate_from_workers.on_update_fkey_table', 'ACCESS SHARE'); BEGIN; -- should fail since the schema is not provided diff --git a/src/test/regress/sql/multi_partitioning.sql b/src/test/regress/sql/multi_partitioning.sql index 91bbc180b..cbfa20440 100644 --- a/src/test/regress/sql/multi_partitioning.sql +++ b/src/test/regress/sql/multi_partitioning.sql @@ -1992,7 +1992,9 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2; -- should work properly - no names clashes +SET client_min_messages TO WARNING; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +RESET client_min_messages; \c - - - :worker_1_port -- check that indexes are named properly diff --git a/src/test/regress/sql/multi_remove_node_reference_table.sql b/src/test/regress/sql/multi_remove_node_reference_table.sql index 310002b74..37c5a0cb2 100644 --- a/src/test/regress/sql/multi_remove_node_reference_table.sql +++ b/src/test/regress/sql/multi_remove_node_reference_table.sql @@ -580,13 +580,19 @@ WHERE ORDER BY shardid ASC; \c - - - :master_port - +SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); SELECT citus_disable_node('localhost', :worker_2_port); SELECT public.wait_until_metadata_sync(); -- status after citus_disable_node_and_wait SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; +-- never mark coordinator metadatasynced = false +SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport = :master_port; + +SELECT 1 FROM citus_remove_node('localhost', :master_port); + + SELECT shardid, shardstate, shardlength, nodename, nodeport FROM diff --git a/src/test/regress/sql/multi_truncate.sql b/src/test/regress/sql/multi_truncate.sql index 252d02ab1..fa532fd82 100644 --- a/src/test/regress/sql/multi_truncate.sql +++ b/src/test/regress/sql/multi_truncate.sql @@ -114,42 +114,13 @@ DROP TABLE test_truncate_range; -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_hash(a int); -SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); - --- verify no error is thrown when no shards are present -TRUNCATE TABLE test_truncate_hash; - -SELECT count(*) FROM test_truncate_hash; +SELECT create_distributed_table('test_truncate_hash', 'a', 'hash'); INSERT INTO test_truncate_hash values (1); INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); -SELECT count(*) FROM test_truncate_hash; - --- verify 4 shards are present -SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - -TRUNCATE TABLE test_truncate_hash; - -SELECT master_create_worker_shards('test_truncate_hash', 4, 1); - -INSERT INTO test_truncate_hash values (1); -INSERT INTO test_truncate_hash values (1001); -INSERT INTO test_truncate_hash values (2000); -INSERT INTO test_truncate_hash values (100); - -SELECT count(*) FROM test_truncate_hash; - -TRUNCATE TABLE test_truncate_hash; - --- verify data is truncated from the table -SELECT count(*) FROM test_truncate_hash; - --- verify 4 shards are still presents -SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; - -- verify that truncate can be aborted INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; diff --git a/src/test/regress/sql/nested_execution.sql b/src/test/regress/sql/nested_execution.sql new file mode 100644 index 000000000..4d0d9336d --- /dev/null +++ b/src/test/regress/sql/nested_execution.sql @@ -0,0 +1,63 @@ +SET search_path TO nested_execution; +SET citus.enable_local_execution TO on; +\set VERBOSITY terse + +-- nested execution from queries on distributed tables is generally disallowed +SELECT dist_query_single_shard(key) FROM distributed WHERE key = 1; +SELECT dist_query_multi_shard() FROM distributed WHERE key = 1; +SELECT ref_query() FROM distributed WHERE key = 1; + +SELECT dist_query_single_shard(key) FROM distributed LIMIT 1; +SELECT dist_query_multi_shard() FROM distributed LIMIT 1; +SELECT ref_query() FROM distributed LIMIT 1; + +-- nested execution is allowed outside of an aggregate +-- note that this behaviour is different if distributed has only 1 shard +-- however, this test always uses 4 shards +SELECT dist_query_single_shard(count(*)::int) FROM distributed; +SELECT dist_query_multi_shard()+count(*) FROM distributed; +SELECT ref_query()+count(*) FROM distributed; + +-- nested execution is allowed in a query that only has intermediate results +SELECT dist_query_single_shard(key) FROM (SELECT key FROM distributed LIMIT 1) s; +SELECT dist_query_multi_shard() FROM (SELECT key FROM distributed LIMIT 1) s; +SELECT ref_query() FROM (SELECT key FROM distributed LIMIT 1) s; + +-- nested execution from queries on reference tables is generally allowed +SELECT dist_query_single_shard(id::int) FROM reference WHERE id = 1; +SELECT dist_query_multi_shard() FROM reference WHERE id = 1; +SELECT ref_query() FROM reference WHERE id = 1; + +-- repeat checks in insert..select (somewhat different code path) +INSERT INTO distributed SELECT dist_query_single_shard(key) FROM distributed WHERE key = 1; +INSERT INTO distributed SELECT dist_query_multi_shard() FROM distributed WHERE key = 1; +INSERT INTO distributed SELECT ref_query() FROM distributed WHERE key = 1; + +INSERT INTO distributed SELECT dist_query_single_shard(key) FROM distributed LIMIT 1; +INSERT INTO distributed SELECT dist_query_multi_shard() FROM distributed LIMIT 1; +INSERT INTO distributed SELECT ref_query() FROM distributed LIMIT 1; + +BEGIN; +INSERT INTO distributed SELECT dist_query_single_shard(count(*)::int) FROM distributed; +INSERT INTO distributed SELECT dist_query_multi_shard()+count(*) FROM distributed; +INSERT INTO distributed SELECT ref_query()+count(*) FROM distributed; +ROLLBACK; + +BEGIN; +INSERT INTO distributed SELECT dist_query_single_shard(key) FROM (SELECT key FROM distributed LIMIT 1) s; +INSERT INTO distributed SELECT dist_query_multi_shard() FROM (SELECT key FROM distributed LIMIT 1) s; +INSERT INTO distributed SELECT ref_query() FROM (SELECT key FROM distributed LIMIT 1) s; +ROLLBACK; + +BEGIN; +INSERT INTO distributed SELECT dist_query_single_shard(id::int) FROM reference WHERE id = 1; +INSERT INTO distributed SELECT dist_query_multi_shard() FROM reference WHERE id = 1; +INSERT INTO distributed SELECT ref_query() FROM reference WHERE id = 1; +ROLLBACK; + +-- nested execution without local execution is disallowed (not distinguishable from queries on shard) +SET citus.enable_local_execution TO off; + +SELECT dist_query_single_shard(id::int) FROM reference WHERE id = 1; +SELECT dist_query_multi_shard() FROM reference WHERE id = 1; +SELECT ref_query() FROM reference WHERE id = 1; diff --git a/src/test/regress/sql/nested_execution_create.sql b/src/test/regress/sql/nested_execution_create.sql new file mode 100644 index 000000000..1c7cfa638 --- /dev/null +++ b/src/test/regress/sql/nested_execution_create.sql @@ -0,0 +1,48 @@ +CREATE SCHEMA nested_execution; +SET search_path TO nested_execution; + +-- some of the next_execution tests change for single shard +SET citus.shard_count TO 4; + +CREATE TABLE distributed (key int, name text, + created_at timestamptz DEFAULT now()); +CREATE TABLE reference (id bigint PRIMARY KEY, title text); + +SELECT create_distributed_table('distributed', 'key'); +SELECT create_reference_table('reference'); + +INSERT INTO distributed SELECT i, i::text, now() FROM generate_series(1,10)i; +INSERT INTO reference SELECT i, i::text FROM generate_series(1,10)i; + +CREATE FUNCTION dist_query_single_shard(p_key int) +RETURNS bigint +LANGUAGE plpgsql AS $$ +DECLARE + result bigint; +BEGIN + SELECT count(*) INTO result FROM nested_execution.distributed WHERE key = p_key; + RETURN result; +END; +$$; + +CREATE FUNCTION dist_query_multi_shard() +RETURNS bigint +LANGUAGE plpgsql AS $$ +DECLARE + result bigint; +BEGIN + SELECT count(*) INTO result FROM nested_execution.distributed; + RETURN result; +END; +$$; + +CREATE FUNCTION ref_query() +RETURNS bigint +LANGUAGE plpgsql AS $$ +DECLARE + result bigint; +BEGIN + SELECT count(*) INTO result FROM nested_execution.reference; + RETURN result; +END; +$$; diff --git a/src/test/regress/sql/non_colocated_subquery_joins.sql b/src/test/regress/sql/non_colocated_subquery_joins.sql index d8a2aaea1..0c8953d2c 100644 --- a/src/test/regress/sql/non_colocated_subquery_joins.sql +++ b/src/test/regress/sql/non_colocated_subquery_joins.sql @@ -785,7 +785,6 @@ SELECT count(*) FROM events_table WHERE user_id NOT IN -- make sure that non-colocated subquery joins work fine in -- modifications CREATE TABLE table1 (id int, tenant_id int); -CREATE VIEW table1_view AS SELECT * from table1 where id < 100; CREATE TABLE table2 (id int, tenant_id int) partition by range(tenant_id); CREATE TABLE table2_p1 PARTITION OF table2 FOR VALUES FROM (1) TO (10); @@ -795,6 +794,8 @@ SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('table2','tenant_id'); SELECT create_distributed_table('table1','tenant_id'); +CREATE VIEW table1_view AS SELECT * from table1 where id < 100; + -- all of the above queries are non-colocated subquery joins -- because the views are replaced with subqueries UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id; diff --git a/src/test/regress/sql/pg12.sql b/src/test/regress/sql/pg12.sql index 2211288f4..4e369106c 100644 --- a/src/test/regress/sql/pg12.sql +++ b/src/test/regress/sql/pg12.sql @@ -388,11 +388,11 @@ CREATE TABLE superuser_columnar_table (a int) USING columnar; CREATE USER read_access; SET ROLE read_access; --- user shouldn't be able to execute alter_columnar_table_set --- or alter_columnar_table_reset for a columnar table that it +-- user shouldn't be able to execute ALTER TABLE ... SET +-- or ALTER TABLE ... RESET for a columnar table that it -- doesn't own -SELECT alter_columnar_table_set('test_pg12.superuser_columnar_table', chunk_group_row_limit => 100); -SELECT alter_columnar_table_reset('test_pg12.superuser_columnar_table'); +ALTER TABLE test_pg12.superuser_columnar_table SET(columnar.chunk_group_row_limit = 100); +ALTER TABLE test_pg12.superuser_columnar_table RESET (columnar.chunk_group_row_limit); RESET ROLE; DROP USER read_access; diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index df3ba4e6c..d64076b26 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -671,6 +671,36 @@ SELECT citus_add_local_table_to_metadata('foreign_table'); SELECT count(*) FROM foreign_table; TRUNCATE foreign_table; + +-- test truncating foreign tables in the same statement with +-- other distributed tables + +CREATE TABLE foreign_table_test_2 (id integer NOT NULL, data text, a bigserial); +CREATE FOREIGN TABLE foreign_table_2 +( + id integer NOT NULL, + data text, + a bigserial +) + SERVER foreign_server + OPTIONS (schema_name 'pg14', table_name 'foreign_table_test_2'); + +SELECT citus_add_local_table_to_metadata('foreign_table_2'); + +CREATE TABLE dist_table_1(a int); +CREATE TABLE dist_table_2(a int); +CREATE TABLE dist_table_3(a int); + +SELECT create_distributed_table('dist_table_1', 'a'); +SELECT create_distributed_table('dist_table_2', 'a'); +SELECT create_reference_table('dist_table_3'); + +TRUNCATE foreign_table, foreign_table_2; +TRUNCATE dist_table_1, foreign_table, dist_table_2, foreign_table_2, dist_table_3; +TRUNCATE dist_table_1, dist_table_2, foreign_table, dist_table_3; +TRUNCATE dist_table_1, foreign_table, foreign_table_2, dist_table_3; +TRUNCATE dist_table_1, foreign_table, foreign_table_2, dist_table_3, dist_table_2; + \c - - - :worker_1_port set search_path to pg14; -- verify the foreign table is truncated diff --git a/src/test/regress/sql/postgres.sql b/src/test/regress/sql/postgres.sql index 77c6a3a7b..e0915d583 100644 --- a/src/test/regress/sql/postgres.sql +++ b/src/test/regress/sql/postgres.sql @@ -27,3 +27,17 @@ AS $function$ BEGIN END; $function$; + +CREATE OR REPLACE FUNCTION pg_catalog.create_distributed_function ( + function_name regprocedure, + distribution_arg_name text DEFAULT NULL, + colocate_with text DEFAULT 'default', + force_delegation bool DEFAULT NULL +) + RETURNS void + LANGUAGE plpgsql + CALLED ON NULL INPUT + AS $function$ + BEGIN + END; + $function$; diff --git a/src/test/regress/sql/propagate_foreign_servers.sql b/src/test/regress/sql/propagate_foreign_servers.sql index eea09b9ab..32cba12ef 100644 --- a/src/test/regress/sql/propagate_foreign_servers.sql +++ b/src/test/regress/sql/propagate_foreign_servers.sql @@ -55,9 +55,8 @@ CREATE SERVER foreign_server_to_drop FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'test'); ---should error -DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop; DROP FOREIGN TABLE foreign_table; +DROP SERVER foreign_server_dependent_schema, foreign_server_to_drop; SELECT citus_remove_node('localhost', :master_port); SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/recursive_dml_queries_mx.sql b/src/test/regress/sql/recursive_dml_queries_mx.sql index 88b21e0b4..426fbc8ae 100644 --- a/src/test/regress/sql/recursive_dml_queries_mx.sql +++ b/src/test/regress/sql/recursive_dml_queries_mx.sql @@ -104,6 +104,7 @@ SET search_path TO recursive_dml_queries_mx, public; CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text); INSERT INTO local_table SELECT i::text, 'user_' || i FROM generate_series (0, 100) i; +SET citus.enable_ddl_propagation TO OFF; CREATE VIEW tenant_ids AS SELECT tenant_id, name @@ -112,6 +113,7 @@ CREATE VIEW tenant_ids AS WHERE distributed_table.dept::text = reference_table.id ORDER BY 2 DESC, 1 DESC; +RESET citus.enable_ddl_propagation; -- we currently do not allow local tables in modification queries UPDATE diff --git a/src/test/regress/sql/resync_metadata_with_sequences.sql b/src/test/regress/sql/resync_metadata_with_sequences.sql index e0b263f52..446fa7e2d 100644 --- a/src/test/regress/sql/resync_metadata_with_sequences.sql +++ b/src/test/regress/sql/resync_metadata_with_sequences.sql @@ -55,6 +55,7 @@ INSERT INTO sensors VALUES (DEFAULT, DEFAULT, '2010-01-01') RETURNING *; INSERT INTO sensors_news VALUES (DEFAULT, DEFAULT, '2021-01-01') RETURNING *; \c - - - :master_port +SET client_min_messages TO ERROR; SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); diff --git a/src/test/regress/sql/run_command_on_all_nodes.sql b/src/test/regress/sql/run_command_on_all_nodes.sql index 46f838eb5..0004a74e7 100644 --- a/src/test/regress/sql/run_command_on_all_nodes.sql +++ b/src/test/regress/sql/run_command_on_all_nodes.sql @@ -27,15 +27,61 @@ SELECT tablename FROM pg_tables WHERE schemaname = 'run_command_on_all_nodes'; SELECT tablename FROM pg_tables WHERE schemaname = 'run_command_on_all_nodes'; \c - - - :master_port +SET search_path TO run_command_on_all_nodes; + SELECT result FROM run_command_on_all_nodes('SELECT tablename FROM pg_tables WHERE schemaname = ''run_command_on_all_nodes'';'); +CREATE TABLE test (x int, y int); +SELECT create_distributed_table('test','x'); + -- break a node and check messages +BEGIN; SELECT nodeid AS worker_1_nodeid FROM pg_dist_node WHERE nodeport = :worker_1_port \gset UPDATE pg_dist_node SET nodeport = 0 WHERE nodeid = :worker_1_nodeid; SELECT nodeid = :worker_1_nodeid AS "Is Worker 1", success, result FROM run_command_on_all_nodes('SELECT 1') ORDER BY 1; SELECT nodeid = :worker_1_nodeid AS "Is Worker 1", success, result FROM run_command_on_all_nodes('SELECT 1', give_warning_for_connection_errors:=true) ORDER BY 1; -UPDATE pg_dist_node SET nodeport = :worker_1_port WHERE nodeid = :worker_1_nodeid; +ROLLBACK; + +-- break connection to localhost +BEGIN; +UPDATE pg_dist_node SET nodeport = 0 WHERE groupid = 0; + +SELECT success, result +FROM run_command_on_coordinator('SELECT inet_server_port()') ORDER BY 1; + +SELECT success, result +FROM run_command_on_coordinator('SELECT inet_server_port()', give_warning_for_connection_errors:=true) ORDER BY 1; + +ROLLBACK; + +-- we cannot use run_command_on_coordinator from workers if coordinator is not in the metadata +SELECT success, result FROM run_command_on_all_nodes($$select result from run_command_on_coordinator('select inet_server_port()')$$); + +-- we can use run_command_on_coordinator from any node if the coordinator is in the metadata +SELECT citus_set_coordinator_host('localhost'); +SELECT success, result FROM run_command_on_all_nodes($$select result from run_command_on_coordinator('select inet_server_port()')$$); +SELECT success, result FROM run_command_on_all_nodes($$select result from run_command_on_coordinator('select count(*) from run_command_on_all_nodes.test')$$); +\c - - - :worker_1_port +-- poor man's DDL from worker +select result from run_command_on_coordinator($$create index on run_command_on_all_nodes.test (x)$$); +\c - - - :master_port +-- remove coordinator from metadata to restore pre-test situation +SELECT citus_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; + +-- check that we fail when pg_dist_node is empty +BEGIN; +DELETE FROM pg_dist_node; +SELECT success, result FROM run_command_on_coordinator('select inet_server_port()'); +ROLLBACK; + +-- check that we can do distributed queries from worker nodes +SELECT success, result FROM run_command_on_all_nodes($$insert into run_command_on_all_nodes.test values (1,2)$$, true); +SELECT success, result FROM run_command_on_all_nodes($$insert into run_command_on_all_nodes.test values (1,2)$$, false); +SELECT success, result FROM run_command_on_all_nodes($$select count(*) from run_command_on_all_nodes.test$$); + +-- ddl commands are only allowed from the coordinator +SELECT success, result FROM run_command_on_all_nodes($$create index on run_command_on_all_nodes.test (x)$$); DROP SCHEMA run_command_on_all_nodes CASCADE; diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index 74c857d4e..21ae9e3ac 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -13,10 +13,16 @@ ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0; -- adding the coordinator as inactive is disallowed SELECT 1 FROM master_add_inactive_node('localhost', :master_port, groupid => 0); +-- before adding a node we are not officially a coordinator +SELECT citus_is_coordinator(); + -- idempotently add node to allow this test to run without add_coordinator SET client_min_messages TO WARNING; SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); +-- after adding a node we are officially a coordinator +SELECT citus_is_coordinator(); + -- coordinator cannot be disabled SELECT 1 FROM citus_disable_node('localhost', :master_port); @@ -1102,10 +1108,6 @@ BEGIN; SELECT coordinated_transaction_should_use_2PC(); ROLLBACK; --- same without transaction block -WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 WHERE a = 1 RETURNING *) -SELECT coordinated_transaction_should_use_2PC() FROM cte_1; - -- if the local execution is disabled, we cannot failover to -- local execution and the queries would fail SET citus.enable_local_execution TO false; diff --git a/src/test/regress/sql/start_stop_metadata_sync.sql b/src/test/regress/sql/start_stop_metadata_sync.sql index a23eba415..4e30cef1c 100644 --- a/src/test/regress/sql/start_stop_metadata_sync.sql +++ b/src/test/regress/sql/start_stop_metadata_sync.sql @@ -103,7 +103,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port SET search_path TO "start_stop_metadata_sync"; SELECT * FROM distributed_table_1; -CREATE VIEW test_view AS SELECT COUNT(*) FROM distributed_table_3; CREATE MATERIALIZED VIEW test_matview AS SELECT COUNT(*) FROM distributed_table_3; SELECT * FROM test_view; SELECT * FROM test_matview; diff --git a/src/test/regress/sql/upgrade_columnar_after.sql b/src/test/regress/sql/upgrade_columnar_after.sql index 4776a1576..df6a7d7da 100644 --- a/src/test/regress/sql/upgrade_columnar_after.sql +++ b/src/test/regress/sql/upgrade_columnar_after.sql @@ -31,11 +31,11 @@ SELECT * FROM test_alter_type ORDER BY a; SELECT * FROM matview ORDER BY a; -- test we retained options -SELECT * FROM columnar.options WHERE regclass = 'test_options_1'::regclass; +SELECT * FROM columnar.options WHERE relation = 'test_options_1'::regclass; VACUUM VERBOSE test_options_1; SELECT count(*), sum(a), sum(b) FROM test_options_1; -SELECT * FROM columnar.options WHERE regclass = 'test_options_2'::regclass; +SELECT * FROM columnar.options WHERE relation = 'test_options_2'::regclass; VACUUM VERBOSE test_options_2; SELECT count(*), sum(a), sum(b) FROM test_options_2; @@ -145,7 +145,7 @@ ROLLBACK; SELECT pg_class.oid INTO columnar_schema_members FROM pg_class, pg_namespace WHERE pg_namespace.oid=pg_class.relnamespace AND - pg_namespace.nspname='columnar'; + pg_namespace.nspname='columnar_internal'; SELECT refobjid INTO columnar_schema_members_pg_depend FROM pg_depend WHERE classid = 'pg_am'::regclass::oid AND @@ -173,7 +173,7 @@ $$ SELECT pg_class.oid INTO columnar_schema_members FROM pg_class, pg_namespace WHERE pg_namespace.oid=pg_class.relnamespace AND - pg_namespace.nspname='columnar'; + pg_namespace.nspname='columnar_internal'; SELECT refobjid INTO columnar_schema_members_pg_depend FROM pg_depend WHERE classid = 'pg_am'::regclass::oid AND diff --git a/src/test/regress/sql/upgrade_columnar_metapage_after.sql b/src/test/regress/sql/upgrade_columnar_metapage_after.sql index e42c0c8da..d015d0b0d 100644 --- a/src/test/regress/sql/upgrade_columnar_metapage_after.sql +++ b/src/test/regress/sql/upgrade_columnar_metapage_after.sql @@ -63,7 +63,7 @@ SELECT version_major, version_minor, reserved_stripe_id, reserved_row_number FROM columnar_storage_info('no_data_columnar_table'); -- table is already upgraded, make sure that upgrade_columnar_metapage is no-op -SELECT citus_internal.upgrade_columnar_storage(c.oid) +SELECT columnar_internal.upgrade_columnar_storage(c.oid) FROM pg_class c, pg_am a WHERE c.relam = a.oid AND amname = 'columnar' and relname = 'columnar_table_2'; diff --git a/src/test/regress/sql/upgrade_post_11_after.sql b/src/test/regress/sql/upgrade_post_11_after.sql index a106b9fcf..71c15614f 100644 --- a/src/test/regress/sql/upgrade_post_11_after.sql +++ b/src/test/regress/sql/upgrade_post_11_after.sql @@ -4,11 +4,11 @@ SET search_path = post_11_upgrade; UPDATE pg_dist_node_metadata SET metadata=jsonb_set(metadata, '{partitioned_citus_table_exists_pre_11}', to_jsonb('true'::bool), true); SELECT citus_finalize_upgrade_to_citus11(enforce_version_check:=false); --- tables are objects with Citus 11+ -SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype) ORDER BY 1; +-- tables, views and their dependencies become objects with Citus 11+ +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.employees'::regclass, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.my_type_for_view'::regtype, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_table_for_view'::regclass, 'post_11_upgrade.non_dist_upgrade_test_view'::regclass, 'post_11_upgrade.non_dist_upgrade_test_view_local_join'::regclass, 'post_11_upgrade.non_dist_upgrade_multiple_dist_view'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass, 'post_11_upgrade.v_test_1'::regclass, 'post_11_upgrade.v_test_2'::regclass, 'post_11_upgrade.owned_by_extension_table'::regclass, 'post_11_upgrade.materialized_view'::regclass, 'post_11_upgrade.owned_by_extension_view'::regclass, 'post_11_upgrade.local_type'::regtype, 'post_11_upgrade.non_dist_dist_table_for_view'::regclass, 'post_11_upgrade.depends_on_nothing_1'::regclass, 'post_11_upgrade.depends_on_nothing_2'::regclass, 'post_11_upgrade.depends_on_pg'::regclass, 'post_11_upgrade.depends_on_citus'::regclass, 'post_11_upgrade.depends_on_seq'::regclass, 'post_11_upgrade.depends_on_seq_and_no_support'::regclass) ORDER BY 1; -- on all nodes -SELECT run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype) ORDER BY 1;$$) ORDER BY 1; +SELECT run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) FROM pg_catalog.pg_dist_object WHERE objid IN ('post_11_upgrade'::regnamespace, 'post_11_upgrade.part_table'::regclass, 'post_11_upgrade.sensors'::regclass, 'post_11_upgrade.func_in_transaction_def'::regproc, 'post_11_upgrade.partial_index_test_config'::regconfig, 'post_11_upgrade.my_type'::regtype, 'post_11_upgrade.view_for_upgrade_test'::regclass, 'post_11_upgrade.view_for_upgrade_test_my_type'::regclass, 'post_11_upgrade.non_dist_upgrade_ref_view_2'::regclass, 'post_11_upgrade.reporting_line'::regclass) ORDER BY 1;$$) ORDER BY 1; -- Create the necessary test utility function CREATE OR REPLACE FUNCTION activate_node_snapshot() @@ -39,4 +39,6 @@ UNION EXCEPT SELECT unnest(activate_node_snapshot()) as command ) -) AS foo WHERE command NOT ILIKE '%distributed_object_data%'; +) AS foo WHERE command NOT ILIKE '%distributed_object_data%' and +-- sequences differ per node, so exclude +command NOT ILIKE '%sequence%'; diff --git a/src/test/regress/sql/upgrade_post_11_before.sql b/src/test/regress/sql/upgrade_post_11_before.sql index 959b026f8..abd61d44d 100644 --- a/src/test/regress/sql/upgrade_post_11_before.sql +++ b/src/test/regress/sql/upgrade_post_11_before.sql @@ -104,6 +104,19 @@ INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +-- table for recursive view +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + +-- table for owned_by_extension +-- note that tables owned by extension are +-- not added to the pg_dist_object, and assumed +-- to exists on all nodes via the extension +CREATE TABLE owned_by_extension_table (employee_id int, manager_id int, full_name text); +ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table; +SELECT create_distributed_table('owned_by_extension_table', 'employee_id'); +SELECT run_command_on_workers($$CREATE TABLE post_11_upgrade.owned_by_extension_table (employee_id int, manager_id int, full_name text);$$); +SELECT run_command_on_workers($$ALTER EXTENSION plpgsql ADD TABLE post_11_upgrade.owned_by_extension_table;$$); SET citus.enable_ddl_propagation TO off; CREATE TEXT SEARCH CONFIGURATION post_11_upgrade.partial_index_test_config ( parser = default ); @@ -129,6 +142,83 @@ END; $$;'); CREATE TYPE post_11_upgrade.my_type AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors; + +-- one normally would not need views on the workers pre-11, but still +-- nice test to have +SELECT run_command_on_workers('SET citus.enable_ddl_propagation TO off; +CREATE VIEW post_11_upgrade.view_for_upgrade_test AS SELECT * FROM sensors;'); + + +-- a non-distributed type dependency to a view +-- both the view and the type should be distributed after the upgrade +CREATE TYPE post_11_upgrade.my_type_for_view AS (a int); +CREATE VIEW post_11_upgrade.view_for_upgrade_test_my_type (casted) AS SELECT row(measureid)::post_11_upgrade.my_type_for_view FROM sensors; + +-- a local type, table and view, should not be distributed +-- after the upgrade +CREATE TYPE post_11_upgrade.local_type AS (a int); +CREATE TABLE post_11_upgrade.non_dist_table_for_view(a int, b post_11_upgrade.local_type); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view AS SELECT * FROM non_dist_table_for_view; + +-- a local table joined with a distributed table. In other words, the view has a local table dependency +-- and should not be distributed after the upgrade +CREATE TABLE post_11_upgrade.non_dist_dist_table_for_view(a int); +CREATE VIEW post_11_upgrade.non_dist_upgrade_test_view_local_join AS SELECT * FROM non_dist_table_for_view JOIN sensors ON (true); + +-- a view selecting from multiple +-- distributed/reference tables should be marked as distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_multiple_dist_view AS SELECT colocated_dist_table.* FROM colocated_dist_table JOIN sensors ON (true) JOIN reference_table ON (true); + +-- a view selecting from reference table should be fine +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view AS SELECT * FROM reference_table; + +-- a view selecting from another (distributed) view should also be distributed +CREATE VIEW post_11_upgrade.non_dist_upgrade_ref_view_2 AS SELECT * FROM non_dist_upgrade_ref_view; + +-- materialized views never becomes distributed +CREATE MATERIALIZED VIEW post_11_upgrade.materialized_view AS SELECT * FROM reference_table; + +CREATE VIEW post_11_upgrade.owned_by_extension_view AS SELECT * FROM reference_table; +ALTER EXTENSION plpgsql ADD VIEW post_11_upgrade.owned_by_extension_view; + +-- temporary views should not be marked as distributed +CREATE VIEW pg_temp.temp_view_1 AS SELECT * FROM reference_table; +CREATE temporary VIEW temp_view_2 AS SELECT * FROM reference_table; + +-- we should be able to distribute recursive views +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT employee_id, + full_name AS subordinates +FROM employees +WHERE manager_id IS NULL +UNION ALL +SELECT e.employee_id, + (rl.subordinates || ' > ' || e.full_name) AS subordinates +FROM employees e +INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; + +-- v_test_1 and v_test_2 becomes circularly dependend views +-- so we should not try to distribute any of the views +CREATE VIEW post_11_upgrade.v_test_1 AS SELECT * FROM sensors; +CREATE VIEW post_11_upgrade.v_test_2 AS SELECT * FROM sensors; +CREATE OR REPLACE VIEW post_11_upgrade.v_test_1 AS SELECT sensors.* FROM sensors JOIN v_test_2 USING (measureid); +CREATE OR REPLACE VIEW post_11_upgrade.v_test_2 AS SELECT sensors.* FROM sensors JOIN v_test_1 USING (measureid); + +-- views that do not depeend on anything should be distributed +CREATE VIEW post_11_upgrade.depends_on_nothing_1 AS SELECT * FROM (VALUES (1)) as values; +CREATE VIEW post_11_upgrade.depends_on_nothing_2 AS SELECT 1; + +-- views depends pg/citus objects should be distributed +CREATE VIEW post_11_upgrade.depends_on_pg AS SELECT * FROM pg_class; +CREATE VIEW post_11_upgrade.depends_on_citus AS SELECT * FROM pg_dist_partition; + +-- views depend on sequences only should be distributed +CREATE SEQUENCE post_11_upgrade.seq_bigint AS bigint INCREMENT BY 3 CACHE 10 CYCLE; +CREATE VIEW post_11_upgrade.depends_on_seq AS SELECT nextval('post_11_upgrade.seq_bigint'); + +-- views depend on a sequence and a local table should not be distributed +CREATE VIEW post_11_upgrade.depends_on_seq_and_no_support AS SELECT nextval('post_11_upgrade.seq_bigint') FROM post_11_upgrade.non_dist_table_for_view; RESET citus.enable_ddl_propagation; diff --git a/src/test/regress/sql/view_propagation.sql b/src/test/regress/sql/view_propagation.sql new file mode 100644 index 000000000..ad6d83eac --- /dev/null +++ b/src/test/regress/sql/view_propagation.sql @@ -0,0 +1,380 @@ +-- Tests to check propagation of all view commands +CREATE SCHEMA view_prop_schema; +SET search_path to view_prop_schema; + +-- Check creating views depending on different types of tables +-- and from multiple schemas + +-- Check the most basic one +CREATE VIEW prop_view_basic AS SELECT 1; + +-- Try to create view depending local table, then try to recreate it after distributing the table +CREATE TABLE view_table_1(id int, val_1 text); +CREATE VIEW prop_view_1 AS + SELECT * FROM view_table_1; + +SELECT create_distributed_table('view_table_1', 'id'); +CREATE OR REPLACE VIEW prop_view_1 AS + SELECT * FROM view_table_1; + +-- Try to create view depending local table, then try to recreate it after making the table reference table +CREATE TABLE view_table_2(id int PRIMARY KEY, val_1 text); +CREATE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; + +SELECT create_reference_table('view_table_2'); +CREATE OR REPLACE VIEW prop_view_2 AS + SELECT view_table_1.id, view_table_2.val_1 FROM view_table_1 INNER JOIN view_table_2 + ON view_table_1.id = view_table_2.id; + +-- Try to create view depending local table, then try to recreate it after making the table citus local table +CREATE TABLE view_table_3(id int, val_1 text); +CREATE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0); +RESET client_min_messages; + +ALTER TABLE view_table_3 +ADD CONSTRAINT f_key_for_local_table +FOREIGN KEY(id) +REFERENCES view_table_2(id); + +CREATE OR REPLACE VIEW prop_view_3 AS + SELECT * FROM view_table_1 WHERE id IN + (SELECT view_table_2.id FROM view_table_2 INNER JOIN view_table_3 ON view_table_2.id = view_table_3.id); + +-- Try to create view depending on PG metadata table +CREATE VIEW prop_view_4 AS + SELECT * FROM pg_stat_activity; + +-- Try to create view depending on Citus metadata table +CREATE VIEW prop_view_5 AS + SELECT * FROM citus_dist_stat_activity; + +-- Try to create table depending on a local table from another schema, then try to create it again after distributing the table +CREATE SCHEMA view_prop_schema_inner; +SET search_path TO view_prop_schema_inner; + +-- Create local table for tests below +CREATE TABLE view_table_4(id int, val_1 text); + +-- Create a distributed table and view to test drop view below +CREATE TABLE inner_view_table(id int); +SELECT create_distributed_table('inner_view_table','id'); +CREATE VIEW inner_view_prop AS SELECT * FROM inner_view_table; + +SET search_path to view_prop_schema; + +CREATE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; + +SELECT create_distributed_table('view_prop_schema_inner.view_table_4','id'); +CREATE OR REPLACE VIEW prop_view_6 AS + SELECT vt1.id, vt4.val_1 FROM view_table_1 AS vt1 + INNER JOIN view_prop_schema_inner.view_table_4 AS vt4 ON vt1.id = vt4.id; + +-- Show that all views are propagated as distributed object +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_%' ORDER BY 1; + +-- Check creating views depending various kind of objects +-- Tests will also check propagating dependent objects + +-- Depending on function +SET citus.enable_ddl_propagation TO OFF; +CREATE OR REPLACE FUNCTION func_1_for_view(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return param_1; +END; +$$; +RESET citus.enable_ddl_propagation; + +-- Show that function will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; + +CREATE VIEW prop_view_7 AS SELECT func_1_for_view(id) FROM view_table_1; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%func_1_for_view%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_7%'; + +-- Depending on type +SET citus.enable_ddl_propagation TO OFF; +CREATE TYPE type_for_view_prop AS ENUM ('a','b','c'); +RESET citus.enable_ddl_propagation; + +-- Show that type will be propagated together with the view +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; + +CREATE VIEW prop_view_8 AS SELECT val_1::type_for_view_prop FROM view_table_1; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%type_for_view_prop%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_8%'; + +-- Depending on another view +CREATE TABLE view_table_5(id int); +CREATE VIEW prop_view_9 AS SELECT * FROM view_table_5; +CREATE VIEW prop_view_10 AS SELECT * FROM prop_view_9; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + +SELECT create_distributed_table('view_table_5', 'id'); +CREATE OR REPLACE VIEW prop_view_10 AS SELECT * FROM prop_view_9; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_9%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_10%'; + +-- Check views owned by non-superuser +SET client_min_messages TO ERROR; +CREATE USER view_creation_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER view_creation_user;$$); +GRANT ALL PRIVILEGES ON SCHEMA view_prop_schema to view_creation_user; + +SET ROLE view_creation_user; + +CREATE TABLE user_owned_table_for_view(id int); +SELECT create_distributed_table('user_owned_table_for_view','id'); +CREATE VIEW view_owned_by_user AS SELECT * FROM user_owned_table_for_view; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; +DROP VIEW view_owned_by_user; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%view_owned_by_user%'; +DROP TABLE user_owned_table_for_view; + +RESET ROLE; +RESET client_min_messages; + +-- Create view with different options + +CREATE TABLE view_table_6(id int, val_1 text); +SELECT create_distributed_table('view_table_6','id'); + +-- TEMP VIEW is not supported. View will be created locally. +CREATE TEMP VIEW temp_prop_view AS SELECT * FROM view_table_6; + +-- Recursive views are supported +CREATE RECURSIVE VIEW nums_1_100_prop_view (n) AS + VALUES (1) +UNION ALL + SELECT n+1 FROM nums_1_100_prop_view WHERE n < 100; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%nums_1_100_prop_view%'; + +-- Sequences are supported as dependency +CREATE SEQUENCE sequence_to_prop; +CREATE VIEW seq_view_prop AS SELECT sequence_to_prop.is_called FROM sequence_to_prop; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%sequence_to_prop%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%seq_view_prop%'; + +-- Views depend on temp sequences will be created locally +CREATE TEMPORARY SEQUENCE temp_sequence_to_drop; +CREATE VIEW temp_seq_view_prop AS SELECT temp_sequence_to_drop.is_called FROM temp_sequence_to_drop; + +-- Check circular dependencies are detected +CREATE VIEW circular_view_1 AS SELECT * FROM view_table_6; +CREATE VIEW circular_view_2 AS SELECT * FROM view_table_6; +CREATE OR REPLACE VIEW circular_view_1 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_2 USING (id); +CREATE OR REPLACE VIEW circular_view_2 AS SELECT view_table_6.* FROM view_table_6 JOIN circular_view_1 USING (id); + +-- Recursive views with distributed tables included +CREATE TABLE employees (employee_id int, manager_id int, full_name text); +SELECT create_distributed_table('employees', 'employee_id'); + +CREATE OR REPLACE RECURSIVE VIEW reporting_line (employee_id, subordinates) AS +SELECT + employee_id, + full_name AS subordinates +FROM + employees +WHERE + manager_id IS NULL +UNION ALL + SELECT + e.employee_id, + ( + rl.subordinates || ' > ' || e.full_name + ) AS subordinates + FROM + employees e + INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id; + +-- Aliases are supported +CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6; + +-- View options are supported +CREATE VIEW opt_prop_view + WITH(check_option=CASCADED, security_barrier=true) + AS SELECT * FROM view_table_6; + +CREATE VIEW sep_opt_prop_view + AS SELECT * FROM view_table_6 + WITH LOCAL CHECK OPTION; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1; + +-- Check definitions and reltoptions of views are correct on workers +\c - - - :worker_1_port + +SELECT definition FROM pg_views WHERE viewname = 'aliased_opt_prop_view'; +SELECT definition FROM pg_views WHERE viewname = 'opt_prop_view'; +SELECT definition FROM pg_views WHERE viewname = 'sep_opt_prop_view'; + +SELECT relname, reloptions +FROM pg_class +WHERE + oid = 'view_prop_schema.aliased_opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.opt_prop_view'::regclass::oid OR + oid = 'view_prop_schema.sep_opt_prop_view'::regclass::oid +ORDER BY 1; + +\c - - - :master_port +SET search_path to view_prop_schema; + +-- Sync metadata to check it works properly after adding a view +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- Drop views and check metadata afterwards +DROP VIEW prop_view_9 CASCADE; +DROP VIEW opt_prop_view, aliased_opt_prop_view, view_prop_schema_inner.inner_view_prop, sep_opt_prop_view; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%inner_view_prop%'; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%'; + +-- Drop a column that view depends on +ALTER TABLE view_table_1 DROP COLUMN val_1 CASCADE; + +-- Since prop_view_3 depends on the view_table_1's val_1 column, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_3%'; + +-- Drop a table that view depends on +DROP TABLE view_table_2 CASCADE; + +-- Since prop_view_2 depends on the view_table_2, it should be dropped +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%prop_view_2%'; + +-- Show that unsupported CREATE OR REPLACE VIEW commands are catched by PG on the coordinator +CREATE TABLE table_to_test_unsup_view(id int, val1 text); +SELECT create_distributed_table('table_to_test_unsup_view', 'id'); + +CREATE VIEW view_for_unsup_commands AS SELECT * FROM table_to_test_unsup_view; + +CREATE OR REPLACE VIEW view_for_unsup_commands(a,b) AS SELECT * FROM table_to_test_unsup_view; +CREATE OR REPLACE VIEW view_for_unsup_commands AS SELECT id FROM table_to_test_unsup_view; + +-- ALTER VIEW PROPAGATION +CREATE TABLE alter_view_table(id int, val1 text); +SELECT create_distributed_table('alter_view_table','id'); + +CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table; + +-- Set/drop default value is not supported by Citus +ALTER VIEW alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text; +ALTER TABLE alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text; + +ALTER VIEW alter_view_1 ALTER COLUMN val1 DROP DEFAULT; +ALTER TABLE alter_view_1 ALTER COLUMN val1 DROP DEFAULT; + +-- Set/reset options view alter view/alter table commands +ALTER VIEW alter_view_1 SET (check_option=cascaded); +ALTER VIEW alter_view_1 SET (security_barrier); +ALTER VIEW alter_view_1 SET (check_option=cascaded, security_barrier); +ALTER VIEW alter_view_1 SET (check_option=cascaded, security_barrier = true); + +ALTER TABLE alter_view_1 SET (check_option=cascaded); +ALTER TABLE alter_view_1 SET (security_barrier); +ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier); +ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier = true); + +-- Check the definition on both coordinator and worker node +SELECT definition FROM pg_views WHERE viewname = 'alter_view_1'; + +SELECT relname, reloptions +FROM pg_class +WHERE oid = 'view_prop_schema.alter_view_1'::regclass::oid; + +\c - - - :worker_1_port +SELECT definition FROM pg_views WHERE viewname = 'alter_view_1'; + +SELECT relname, reloptions +FROM pg_class +WHERE oid = 'view_prop_schema.alter_view_1'::regclass::oid; + +\c - - - :master_port +SET search_path to view_prop_schema; + +ALTER TABLE alter_view_1 RESET (check_option, security_barrier); +ALTER VIEW alter_view_1 RESET (check_option, security_barrier); + +-- Change the schema of the view +ALTER TABLE alter_view_1 SET SCHEMA view_prop_schema_inner; +ALTER VIEW view_prop_schema_inner.alter_view_1 SET SCHEMA view_prop_schema; + +-- Rename view and view's column name +ALTER VIEW alter_view_1 RENAME COLUMN val1 TO val2; +ALTER VIEW alter_view_1 RENAME val2 TO val1; +ALTER VIEW alter_view_1 RENAME TO alter_view_2; + +ALTER TABLE alter_view_2 RENAME COLUMN val1 TO val2; +ALTER TABLE alter_view_2 RENAME val2 TO val1; +ALTER TABLE alter_view_2 RENAME TO alter_view_1; + +-- Alter owner vith alter view/alter table +SET client_min_messages TO ERROR; +CREATE USER alter_view_user; +SELECT 1 FROM run_command_on_workers($$CREATE USER alter_view_user;$$); +RESET client_min_messages; +ALTER VIEW alter_view_1 OWNER TO alter_view_user; +ALTER TABLE alter_view_1 OWNER TO alter_view_user; + +-- Alter view owned by extension +CREATE TABLE table_for_ext_owned_view(id int); +CREATE VIEW extension_owned_view AS SELECT * FROM table_for_ext_owned_view; + +CREATE EXTENSION seg; +ALTER EXTENSION seg ADD VIEW extension_owned_view; + +SELECT create_distributed_table('table_for_ext_owned_view','id'); +CREATE OR REPLACE VIEW extension_owned_view AS SELECT * FROM table_for_ext_owned_view; + +-- Since the view is owned by extension Citus shouldn't propagate it +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%extension_owned_view%'; + +-- Try syncing metadata after running ALTER VIEW commands +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- Alter non-existing view +ALTER VIEW IF EXISTS non_existing_view ALTER COLUMN val1 SET DEFAULT random()::text; +ALTER VIEW IF EXISTS non_existing_view SET (check_option=cascaded); +ALTER VIEW IF EXISTS non_existing_view RENAME COLUMN val1 TO val2; +ALTER VIEW IF EXISTS non_existing_view RENAME val2 TO val1; +ALTER VIEW IF EXISTS non_existing_view SET SCHEMA view_prop_schema; + +-- Show that create view and alter view commands can be run from same transaction +-- but not the drop view. Since we can not use metadata connection for drop view commands +BEGIN; + SET LOCAL citus.force_max_query_parallelization TO ON; + CREATE TABLE table_1_to_view_in_transaction(a int); + SELECT create_distributed_table('table_1_to_view_in_transaction', 'a'); + + CREATE TABLE table_2_to_view_in_transaction(a int); + SELECT create_distributed_table('table_2_to_view_in_transaction', 'a'); + + -- we can create/alter/drop views even in parallel mode + CREATE VIEW view_in_transaction AS SELECT table_1_to_view_in_transaction.* FROM table_2_to_view_in_transaction JOIN table_1_to_view_in_transaction USING (a); + ALTER TABLE view_in_transaction SET (security_barrier); + ALTER VIEW view_in_transaction SET SCHEMA public; + ALTER VIEW public.view_in_transaction SET SCHEMA view_prop_schema_inner; + ALTER TABLE view_prop_schema_inner.view_in_transaction RENAME COLUMN a TO b; + DROP VIEW view_prop_schema_inner.view_in_transaction; +ROLLBACK; + +SET client_min_messages TO ERROR; +DROP SCHEMA view_prop_schema_inner CASCADE; +DROP SCHEMA view_prop_schema CASCADE; diff --git a/src/test/regress/sql/views_create.sql b/src/test/regress/sql/views_create.sql index d30676c42..0c9b2acc1 100644 --- a/src/test/regress/sql/views_create.sql +++ b/src/test/regress/sql/views_create.sql @@ -2,6 +2,10 @@ CREATE SCHEMA views_create; SET search_path TO views_create; CREATE TABLE view_test_table(a INT NOT NULL PRIMARY KEY, b BIGINT, c text); +SELECT create_distributed_table('view_test_table', 'a'); +-- Since creating view distributed or locally depends on the arbitrary config +-- set client_min_messages to ERROR to get consistent result. +SET client_min_messages TO ERROR; CREATE OR REPLACE VIEW select_filtered_view AS SELECT * FROM view_test_table WHERE c = 'testing' WITH CASCADED CHECK OPTION; @@ -10,7 +14,7 @@ CREATE OR REPLACE VIEW select_all_view AS WITH LOCAL CHECK OPTION; CREATE OR REPLACE VIEW count_view AS SELECT COUNT(*) FROM view_test_table; -SELECT create_distributed_table('view_test_table', 'a'); +RESET client_min_messages; INSERT INTO view_test_table VALUES (1,1,'testing'), (2,1,'views'); SELECT * FROM count_view; diff --git a/src/test/regress/sql/with_prepare.sql b/src/test/regress/sql/with_prepare.sql index 891600b00..486b5af2b 100644 --- a/src/test/regress/sql/with_prepare.sql +++ b/src/test/regress/sql/with_prepare.sql @@ -225,6 +225,21 @@ ORDER BY time LIMIT 10; +-- +-- Test a prepared statement with unused argument +-- +CREATE TYPE foo as (x int, y int); +CREATE TABLE footest (x int, y int, z foo); +SELECT create_distributed_table('footest','x'); +INSERT INTO footest VALUES(1, 2, (3,4)); + +-- Add a redundant parameter +PREPARE prepared_test_9(foo,foo) AS +WITH a AS ( + SELECT * FROM footest WHERE z = $1 AND x = 1 OFFSET 0 +) +SELECT * FROM a; + EXECUTE prepared_test_1; EXECUTE prepared_test_1; EXECUTE prepared_test_1; @@ -301,6 +316,13 @@ EXECUTE prepared_test_8; EXECUTE prepared_test_8; ROLLBACK; +EXECUTE prepared_test_9('(3,4)','(2,3)'); +EXECUTE prepared_test_9('(3,4)','(2,3)'); +EXECUTE prepared_test_9('(3,4)','(2,3)'); +EXECUTE prepared_test_9('(3,4)','(2,3)'); +EXECUTE prepared_test_9('(3,4)','(2,3)'); +EXECUTE prepared_test_9('(3,4)','(2,3)'); + EXECUTE prepared_partition_column_insert(1); EXECUTE prepared_partition_column_insert(2); EXECUTE prepared_partition_column_insert(3); diff --git a/src/test/regress/sql_schedule b/src/test/regress/sql_schedule index fc011abf6..aadc717e1 100644 --- a/src/test/regress/sql_schedule +++ b/src/test/regress/sql_schedule @@ -4,10 +4,11 @@ test: ch_benchmarks_1 ch_benchmarks_2 ch_benchmarks_3 test: ch_benchmarks_4 ch_benchmarks_5 ch_benchmarks_6 test: intermediate_result_pruning_queries_1 intermediate_result_pruning_queries_2 test: dropped_columns_1 distributed_planning -test: local_dist_join +test: local_dist_join nested_execution test: connectivity_checks citus_run_command test: schemas test: sequences +test: functions test: arbitrary_configs_truncate test: arbitrary_configs_truncate_cascade test: arbitrary_configs_truncate_partition