mirror of https://github.com/citusdata/citus.git
Drops PG14 support (#7753)
DESCRIPTION: Drops PG14 support 1. Remove "$version_num" != 'xx' from configure file 2. delete all PG_VERSION_NUM = PG_VERSION_XX references in the code 3. Look at pg_version_compat.h file, remove all _compat functions etc defined specifically for PGXX differences 4. delete all PG_VERSION_NUM >= PG_VERSION_(XX+1), PG_VERSION_NUM < PG_VERSION_(XX+1) ifs in the codebase 5. delete ruleutils_xx.c file 6. cleanup normalize.sed file from pg14 specific lines 7. delete all alternative output files for that particular PG version, server_version_ge variable helps herepull/7876/head
parent
6b70724b31
commit
9a7f6d6c59
|
@ -25,8 +25,6 @@ configure -whitespace
|
|||
|
||||
# except these exceptions...
|
||||
src/backend/distributed/utils/citus_outfuncs.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_13.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_14.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_15.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_16.c -citus-style
|
||||
src/backend/distributed/deparser/ruleutils_17.c -citus-style
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
style_checker_tools_version: "0.8.18"
|
||||
sql_snapshot_pg_version: "17.2"
|
||||
image_suffix: "-v889e4c1"
|
||||
pg14_version: '{ "major": "14", "full": "14.15" }'
|
||||
image_suffix_citus_upgrade: "-dev-2ad1f90"
|
||||
pg15_version: '{ "major": "15", "full": "15.10" }'
|
||||
pg16_version: '{ "major": "16", "full": "16.6" }'
|
||||
pg17_version: '{ "major": "17", "full": "17.2" }'
|
||||
|
@ -106,7 +106,6 @@ jobs:
|
|||
image_suffix:
|
||||
- ${{ needs.params.outputs.image_suffix}}
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg14_version }}
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
- ${{ needs.params.outputs.pg17_version }}
|
||||
|
@ -138,7 +137,6 @@ jobs:
|
|||
image_name:
|
||||
- ${{ needs.params.outputs.test_image_name }}
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg14_version }}
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
- ${{ needs.params.outputs.pg17_version }}
|
||||
|
@ -159,10 +157,6 @@ jobs:
|
|||
- check-enterprise-isolation-logicalrep-2
|
||||
- check-enterprise-isolation-logicalrep-3
|
||||
include:
|
||||
- make: check-failure
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-failure
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -175,10 +169,6 @@ jobs:
|
|||
pg_version: ${{ needs.params.outputs.pg17_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-enterprise-failure
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-enterprise-failure
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -191,10 +181,6 @@ jobs:
|
|||
pg_version: ${{ needs.params.outputs.pg17_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-pytest
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-pytest
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -219,10 +205,6 @@ jobs:
|
|||
suite: cdc
|
||||
image_name: ${{ needs.params.outputs.test_image_name }}
|
||||
pg_version: ${{ needs.params.outputs.pg17_version }}
|
||||
- make: check-query-generator
|
||||
pg_version: ${{ needs.params.outputs.pg14_version }}
|
||||
suite: regress
|
||||
image_name: ${{ needs.params.outputs.fail_test_image_name }}
|
||||
- make: check-query-generator
|
||||
pg_version: ${{ needs.params.outputs.pg15_version }}
|
||||
suite: regress
|
||||
|
@ -275,7 +257,6 @@ jobs:
|
|||
image_name:
|
||||
- ${{ needs.params.outputs.fail_test_image_name }}
|
||||
pg_version:
|
||||
- ${{ needs.params.outputs.pg14_version }}
|
||||
- ${{ needs.params.outputs.pg15_version }}
|
||||
- ${{ needs.params.outputs.pg16_version }}
|
||||
- ${{ needs.params.outputs.pg17_version }}
|
||||
|
@ -323,18 +304,12 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- old_pg_major: 14
|
||||
new_pg_major: 15
|
||||
- old_pg_major: 15
|
||||
new_pg_major: 16
|
||||
- old_pg_major: 14
|
||||
new_pg_major: 16
|
||||
- old_pg_major: 16
|
||||
new_pg_major: 17
|
||||
- old_pg_major: 15
|
||||
new_pg_major: 17
|
||||
- old_pg_major: 14
|
||||
new_pg_major: 17
|
||||
env:
|
||||
old_pg_major: ${{ matrix.old_pg_major }}
|
||||
new_pg_major: ${{ matrix.new_pg_major }}
|
||||
|
@ -370,10 +345,10 @@ jobs:
|
|||
flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
|
||||
codecov_token: ${{ secrets.CODECOV_TOKEN }}
|
||||
test-citus-upgrade:
|
||||
name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade
|
||||
name: PG${{ fromJson(needs.params.outputs.pg15_version).major }} - check-citus-upgrade
|
||||
runs-on: ubuntu-20.04
|
||||
container:
|
||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}"
|
||||
image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg15_version).full }}${{ needs.params.outputs.image_suffix_citus_upgrade }}"
|
||||
options: --user root
|
||||
needs:
|
||||
- params
|
||||
|
|
|
@ -2588,7 +2588,7 @@ fi
|
|||
if test "$with_pg_version_check" = no; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
|
||||
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
|
||||
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
||||
|
|
|
@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
|
|||
|
||||
if test "$with_pg_version_check" = no; then
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
|
||||
elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
elif test "$version_num" != '15' -a "$version_num" != '16' -a "$version_num" != '17'; then
|
||||
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
||||
else
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
||||
|
|
|
@ -1312,11 +1312,8 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
|
|||
|
||||
cpath->methods = &ColumnarScanPathMethods;
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* necessary to avoid extra Result node in PG15 */
|
||||
cpath->flags = CUSTOMPATH_SUPPORT_PROJECTION;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* populate generic path information
|
||||
|
|
|
@ -1686,7 +1686,7 @@ DeleteTupleAndEnforceConstraints(ModifyState *state, HeapTuple heapTuple)
|
|||
simple_heap_delete(state->rel, tid);
|
||||
|
||||
/* execute AFTER ROW DELETE Triggers to enforce constraints */
|
||||
ExecARDeleteTriggers_compat(estate, resultRelInfo, tid, NULL, NULL, false);
|
||||
ExecARDeleteTriggers(estate, resultRelInfo, tid, NULL, NULL, false);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -877,7 +877,7 @@ columnar_relation_set_new_filelocator(Relation rel,
|
|||
|
||||
*freezeXid = RecentXmin;
|
||||
*minmulti = GetOldestMultiXactId();
|
||||
SMgrRelation srel = RelationCreateStorage_compat(*newrlocator, persistence, true);
|
||||
SMgrRelation srel = RelationCreateStorage(*newrlocator, persistence, true);
|
||||
|
||||
ColumnarStorageInit(srel, ColumnarMetadataNewStorageId());
|
||||
InitColumnarOptions(rel->rd_id);
|
||||
|
@ -2245,7 +2245,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
|
|||
columnarRangeVar = alterTableStmt->relation;
|
||||
}
|
||||
}
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
else if (alterTableCmd->subtype == AT_SetAccessMethod)
|
||||
{
|
||||
if (columnarRangeVar || *columnarOptions)
|
||||
|
@ -2265,7 +2264,6 @@ ColumnarProcessAlterTable(AlterTableStmt *alterTableStmt, List **columnarOptions
|
|||
DeleteColumnarTableOptions(RelationGetRelid(rel), true);
|
||||
}
|
||||
}
|
||||
#endif /* PG_VERSION_15 */
|
||||
}
|
||||
|
||||
relation_close(rel, NoLock);
|
||||
|
@ -2649,21 +2647,12 @@ ColumnarCheckLogicalReplication(Relation rel)
|
|||
return;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
{
|
||||
PublicationDesc pubdesc;
|
||||
|
||||
RelationBuildPublicationDesc(rel, &pubdesc);
|
||||
pubActionInsert = pubdesc.pubactions.pubinsert;
|
||||
}
|
||||
#else
|
||||
if (rel->rd_pubactions == NULL)
|
||||
{
|
||||
GetRelationPublicationActions(rel);
|
||||
Assert(rel->rd_pubactions != NULL);
|
||||
}
|
||||
pubActionInsert = rel->rd_pubactions->pubinsert;
|
||||
#endif
|
||||
|
||||
if (pubActionInsert)
|
||||
{
|
||||
|
|
|
@ -145,17 +145,6 @@ LogicalClockShmemSize(void)
|
|||
void
|
||||
InitializeClusterClockMem(void)
|
||||
{
|
||||
/* On PG 15 and above, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory for pre PG-15 versions */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(LogicalClockShmemSize());
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = LogicalClockShmemInit;
|
||||
}
|
||||
|
|
|
@ -68,8 +68,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
char *collcollate;
|
||||
char *collctype;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* In PG15, there is an added option to use ICU as global locale provider.
|
||||
* pg_collation has three locale-related fields: collcollate and collctype,
|
||||
|
@ -112,16 +110,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
}
|
||||
|
||||
Assert((collcollate && collctype) || colllocale);
|
||||
#else
|
||||
|
||||
/*
|
||||
* In versions before 15, collcollate and collctype were type "name". Use
|
||||
* pstrdup() to match the interface of 15 so that we consistently free the
|
||||
* result later.
|
||||
*/
|
||||
collcollate = pstrdup(NameStr(collationForm->collcollate));
|
||||
collctype = pstrdup(NameStr(collationForm->collctype));
|
||||
#endif
|
||||
|
||||
if (collowner != NULL)
|
||||
{
|
||||
|
@ -147,7 +135,6 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
"CREATE COLLATION %s (provider = '%s'",
|
||||
*quotedCollationName, providerString);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if (colllocale)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
|
@ -173,24 +160,7 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
|
|||
pfree(collcollate);
|
||||
pfree(collctype);
|
||||
}
|
||||
#else
|
||||
if (strcmp(collcollate, collctype) == 0)
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", locale = %s",
|
||||
quote_literal_cstr(collcollate));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfo(&collationNameDef,
|
||||
", lc_collate = %s, lc_ctype = %s",
|
||||
quote_literal_cstr(collcollate),
|
||||
quote_literal_cstr(collctype));
|
||||
}
|
||||
|
||||
pfree(collcollate);
|
||||
pfree(collctype);
|
||||
#endif
|
||||
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||
char *collicurules = NULL;
|
||||
datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);
|
||||
|
|
|
@ -170,12 +170,10 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId,
|
|||
static void CopyLocalDataIntoShards(Oid distributedTableId);
|
||||
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static bool DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
|
||||
Var *distributionColumn);
|
||||
static int numeric_typmod_scale(int32 typmod);
|
||||
static bool is_valid_numeric_typmod(int32 typmod);
|
||||
#endif
|
||||
|
||||
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
|
||||
Var *distributionColumn);
|
||||
|
@ -2114,8 +2112,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
"AS (...) STORED.")));
|
||||
}
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* verify target relation is not distributed by a column of type numeric with negative scale */
|
||||
if (distributionMethod != DISTRIBUTE_BY_NONE &&
|
||||
DistributionColumnUsesNumericColumnNegativeScale(relationDesc,
|
||||
|
@ -2126,7 +2122,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
|
|||
errdetail("Distribution column must not use numeric type "
|
||||
"with negative scale")));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check for support function needed by specified partition method */
|
||||
if (distributionMethod == DISTRIBUTE_BY_HASH)
|
||||
|
@ -2844,8 +2839,6 @@ TupleDescColumnNameList(TupleDesc tupleDescriptor)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* is_valid_numeric_typmod checks if the typmod value is valid
|
||||
*
|
||||
|
@ -2895,8 +2888,6 @@ DistributionColumnUsesNumericColumnNegativeScale(TupleDesc relationDesc,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DistributionColumnUsesGeneratedStoredColumn returns whether a given relation uses
|
||||
* GENERATED ALWAYS AS (...) STORED on distribution column
|
||||
|
|
|
@ -185,8 +185,6 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local
|
||||
* postgres instance.
|
||||
|
@ -217,9 +215,6 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress
|
||||
* of the database.
|
||||
|
|
|
@ -465,7 +465,6 @@ static DistributeObjectOps Database_Alter = {
|
|||
.markDistributed = false,
|
||||
};
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static DistributeObjectOps Database_RefreshColl = {
|
||||
.deparse = DeparseAlterDatabaseRefreshCollStmt,
|
||||
.qualify = NULL,
|
||||
|
@ -476,7 +475,6 @@ static DistributeObjectOps Database_RefreshColl = {
|
|||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#endif
|
||||
|
||||
static DistributeObjectOps Domain_Alter = {
|
||||
.deparse = DeparseAlterDomainStmt,
|
||||
|
@ -837,7 +835,6 @@ static DistributeObjectOps Sequence_AlterOwner = {
|
|||
.address = AlterSequenceOwnerStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static DistributeObjectOps Sequence_AlterPersistence = {
|
||||
.deparse = DeparseAlterSequencePersistenceStmt,
|
||||
.qualify = QualifyAlterSequencePersistenceStmt,
|
||||
|
@ -847,7 +844,6 @@ static DistributeObjectOps Sequence_AlterPersistence = {
|
|||
.address = AlterSequencePersistenceStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
#endif
|
||||
static DistributeObjectOps Sequence_Drop = {
|
||||
.deparse = DeparseDropSequenceStmt,
|
||||
.qualify = QualifyDropSequenceStmt,
|
||||
|
@ -1299,7 +1295,7 @@ static DistributeObjectOps View_Rename = {
|
|||
static DistributeObjectOps Trigger_Rename = {
|
||||
.deparse = NULL,
|
||||
.qualify = NULL,
|
||||
.preprocess = PreprocessAlterTriggerRenameStmt,
|
||||
.preprocess = NULL,
|
||||
.operationType = DIST_OPS_ALTER,
|
||||
.postprocess = PostprocessAlterTriggerRenameStmt,
|
||||
.address = NULL,
|
||||
|
@ -1321,13 +1317,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &Database_Alter;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
case T_AlterDatabaseRefreshCollStmt:
|
||||
{
|
||||
return &Database_RefreshColl;
|
||||
}
|
||||
|
||||
#endif
|
||||
case T_AlterDomainStmt:
|
||||
{
|
||||
return &Domain_Alter;
|
||||
|
@ -1612,7 +1606,6 @@ GetDistributeObjectOps(Node *node)
|
|||
|
||||
case OBJECT_SEQUENCE:
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
ListCell *cmdCell = NULL;
|
||||
foreach(cmdCell, stmt->cmds)
|
||||
{
|
||||
|
@ -1640,7 +1633,6 @@ GetDistributeObjectOps(Node *node)
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Prior to PG15, the only Alter Table statement
|
||||
|
|
|
@ -467,7 +467,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
|
|||
}
|
||||
|
||||
List *onDeleteSetDefColumnList = NIL;
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
Datum onDeleteSetDefColumnsDatum = SysCacheGetAttr(CONSTROID, pgConstraintTuple,
|
||||
Anum_pg_constraint_confdelsetcols,
|
||||
&isNull);
|
||||
|
@ -482,7 +481,6 @@ ForeignKeyGetDefaultingAttrs(HeapTuple pgConstraintTuple)
|
|||
onDeleteSetDefColumnList =
|
||||
IntegerArrayTypeToList(DatumGetArrayTypeP(onDeleteSetDefColumnsDatum));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (list_length(onDeleteSetDefColumnList) == 0)
|
||||
{
|
||||
|
|
|
@ -33,11 +33,9 @@
|
|||
|
||||
|
||||
static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static PublicationObjSpec * BuildPublicationRelationObjSpec(Oid relationId,
|
||||
Oid publicationId,
|
||||
bool tableOnly);
|
||||
#endif
|
||||
static void AppendPublishOptionList(StringInfo str, List *strings);
|
||||
static char * AlterPublicationOwnerCommand(Oid publicationId);
|
||||
static bool ShouldPropagateCreatePublication(CreatePublicationStmt *stmt);
|
||||
|
@ -154,7 +152,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
|
||||
ReleaseSysCache(publicationTuple);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
List *schemaIds = GetPublicationSchemas(publicationId);
|
||||
Oid schemaId = InvalidOid;
|
||||
|
||||
|
@ -170,7 +167,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
|
||||
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
|
||||
}
|
||||
#endif
|
||||
|
||||
List *relationIds = GetPublicationRelations(publicationId,
|
||||
publicationForm->pubviaroot ?
|
||||
|
@ -184,7 +180,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
|
||||
foreach_declared_oid(relationId, relationIds)
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
bool tableOnly = false;
|
||||
|
||||
/* since postgres 15, tables can have a column list and filter */
|
||||
|
@ -192,15 +187,6 @@ BuildCreatePublicationStmt(Oid publicationId)
|
|||
BuildPublicationRelationObjSpec(relationId, publicationId, tableOnly);
|
||||
|
||||
createPubStmt->pubobjects = lappend(createPubStmt->pubobjects, publicationObject);
|
||||
#else
|
||||
|
||||
/* before postgres 15, only full tables are supported */
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
|
||||
char *tableName = get_rel_name(relationId);
|
||||
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
|
||||
|
||||
createPubStmt->tables = lappend(createPubStmt->tables, rangeVar);
|
||||
#endif
|
||||
|
||||
if (IsCitusTable(relationId))
|
||||
{
|
||||
|
@ -276,8 +262,6 @@ AppendPublishOptionList(StringInfo str, List *options)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* BuildPublicationRelationObjSpec returns a PublicationObjSpec that
|
||||
* can be included in a CREATE or ALTER PUBLICATION statement.
|
||||
|
@ -357,9 +341,6 @@ BuildPublicationRelationObjSpec(Oid relationId, Oid publicationId,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterPublicationStmt handles ALTER PUBLICATION statements
|
||||
* in a way that is mostly similar to PreprocessAlterDistributedObjectStmt,
|
||||
|
@ -458,7 +439,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
|
|||
|
||||
ReleaseSysCache(pubTuple);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
bool tableOnly = !isAdd;
|
||||
|
||||
/* since postgres 15, tables can have a column list and filter */
|
||||
|
@ -467,16 +447,6 @@ GetAlterPublicationTableDDLCommand(Oid publicationId, Oid relationId,
|
|||
|
||||
alterPubStmt->pubobjects = lappend(alterPubStmt->pubobjects, publicationObject);
|
||||
alterPubStmt->action = isAdd ? AP_AddObjects : AP_DropObjects;
|
||||
#else
|
||||
|
||||
/* before postgres 15, only full tables are supported */
|
||||
char *schemaName = get_namespace_name(get_rel_namespace(relationId));
|
||||
char *tableName = get_rel_name(relationId);
|
||||
RangeVar *rangeVar = makeRangeVar(schemaName, tableName, -1);
|
||||
|
||||
alterPubStmt->tables = lappend(alterPubStmt->tables, rangeVar);
|
||||
alterPubStmt->tableAction = isAdd ? DEFELEM_ADD : DEFELEM_DROP;
|
||||
#endif
|
||||
|
||||
/* we take the WHERE clause from the catalog where it is already transformed */
|
||||
bool whereClauseNeedsTransform = false;
|
||||
|
|
|
@ -1027,13 +1027,8 @@ makeStringConst(char *str, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.sval.type = T_String;
|
||||
n->val.sval.sval = str;
|
||||
#else
|
||||
n->val.type = T_String;
|
||||
n->val.val.str = str;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
@ -1053,13 +1048,8 @@ makeIntConst(int val, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.ival.type = T_Integer;
|
||||
n->val.ival.ival = val;
|
||||
#else
|
||||
n->val.type = T_Integer;
|
||||
n->val.val.ival = val;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
@ -1076,13 +1066,8 @@ makeFloatConst(char *str, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.fval.type = T_Float;
|
||||
n->val.fval.fval = str;
|
||||
#else
|
||||
n->val.type = T_Float;
|
||||
n->val.val.str = str;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
|
|
@ -735,8 +735,6 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* PreprocessAlterSequencePersistenceStmt is called for change of persistence
|
||||
* of sequences before the persistence is changed on the local instance.
|
||||
|
@ -847,9 +845,6 @@ PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessGrantOnSequenceStmt is executed before the statement is applied to the local
|
||||
* postgres instance.
|
||||
|
|
|
@ -1153,7 +1153,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
|||
{
|
||||
AlterTableStmt *stmtCopy = copyObject(alterTableStatement);
|
||||
stmtCopy->objtype = OBJECT_SEQUENCE;
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* it must be ALTER TABLE .. OWNER TO ..
|
||||
|
@ -1163,16 +1162,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
|
|||
*/
|
||||
return PreprocessSequenceAlterTableStmt((Node *) stmtCopy, alterTableCommand,
|
||||
processUtilityContext);
|
||||
#else
|
||||
|
||||
/*
|
||||
* it must be ALTER TABLE .. OWNER TO .. command
|
||||
* since this is the only ALTER command of a sequence that
|
||||
* passes through an AlterTableStmt
|
||||
*/
|
||||
return PreprocessAlterSequenceOwnerStmt((Node *) stmtCopy, alterTableCommand,
|
||||
processUtilityContext);
|
||||
#endif
|
||||
}
|
||||
else if (relKind == RELKIND_VIEW)
|
||||
{
|
||||
|
@ -3673,9 +3662,8 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
"are currently unsupported.")));
|
||||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
case AT_SetAccessMethod:
|
||||
{
|
||||
/*
|
||||
|
@ -3695,7 +3683,6 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement)
|
|||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
case AT_SetNotNull:
|
||||
case AT_ReplicaIdentity:
|
||||
case AT_ChangeOwner:
|
||||
|
|
|
@ -57,9 +57,6 @@ static void ExtractDropStmtTriggerAndRelationName(DropStmt *dropTriggerStmt,
|
|||
static void ErrorIfDropStmtDropsMultipleTriggers(DropStmt *dropTriggerStmt);
|
||||
static char * GetTriggerNameById(Oid triggerId);
|
||||
static int16 GetTriggerTypeById(Oid triggerId);
|
||||
#if (PG_VERSION_NUM < PG_VERSION_15)
|
||||
static void ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname);
|
||||
#endif
|
||||
|
||||
|
||||
/* GUC that overrides trigger checks for distributed tables and reference tables */
|
||||
|
@ -404,40 +401,6 @@ CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaNam
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTriggerRenameStmt is called before a ALTER TRIGGER RENAME
|
||||
* command has been executed by standard process utility. This function errors
|
||||
* out if we are trying to rename a child trigger on a partition of a distributed
|
||||
* table. In PG15, this is not allowed anyway.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
#if (PG_VERSION_NUM < PG_VERSION_15)
|
||||
RenameStmt *renameTriggerStmt = castNode(RenameStmt, node);
|
||||
Assert(renameTriggerStmt->renameType == OBJECT_TRIGGER);
|
||||
|
||||
RangeVar *relation = renameTriggerStmt->relation;
|
||||
|
||||
bool missingOk = false;
|
||||
Oid relationId = RangeVarGetRelid(relation, ALTER_TRIGGER_LOCK_MODE, missingOk);
|
||||
|
||||
if (!IsCitusTable(relationId))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
ErrorOutForTriggerIfNotSupported(relationId);
|
||||
|
||||
ErrorOutIfCloneTrigger(relationId, renameTriggerStmt->subname);
|
||||
#endif
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterTriggerRenameStmt is called after a ALTER TRIGGER RENAME
|
||||
* command has been executed by standard process utility. This function errors
|
||||
|
@ -759,64 +722,6 @@ ErrorIfRelationHasUnsupportedTrigger(Oid relationId)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM < PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* ErrorOutIfCloneTrigger is a helper function to error
|
||||
* out if we are trying to rename a child trigger on a
|
||||
* partition of a distributed table.
|
||||
* A lot of this code is borrowed from PG15 because
|
||||
* renaming clone triggers isn't allowed in PG15 anymore.
|
||||
*/
|
||||
static void
|
||||
ErrorOutIfCloneTrigger(Oid tgrelid, const char *tgname)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
ScanKeyData key[2];
|
||||
|
||||
Relation tgrel = table_open(TriggerRelationId, RowExclusiveLock);
|
||||
|
||||
/*
|
||||
* Search for the trigger to modify.
|
||||
*/
|
||||
ScanKeyInit(&key[0],
|
||||
Anum_pg_trigger_tgrelid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(tgrelid));
|
||||
ScanKeyInit(&key[1],
|
||||
Anum_pg_trigger_tgname,
|
||||
BTEqualStrategyNumber, F_NAMEEQ,
|
||||
CStringGetDatum(tgname));
|
||||
SysScanDesc tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
|
||||
NULL, 2, key);
|
||||
|
||||
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
|
||||
{
|
||||
Form_pg_trigger trigform = (Form_pg_trigger) GETSTRUCT(tuple);
|
||||
|
||||
/*
|
||||
* If the trigger descends from a trigger on a parent partitioned
|
||||
* table, reject the rename.
|
||||
* Appended shard ids to find the trigger on the partition's shards
|
||||
* are not correct. Hence we would fail to find the trigger on the
|
||||
* partition's shard.
|
||||
*/
|
||||
if (OidIsValid(trigform->tgparentid))
|
||||
{
|
||||
ereport(ERROR, (
|
||||
errmsg(
|
||||
"cannot rename child triggers on distributed partitions")));
|
||||
}
|
||||
}
|
||||
|
||||
systable_endscan(tgscan);
|
||||
table_close(tgrel, RowExclusiveLock);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* GetDropTriggerStmtRelation takes a DropStmt for a trigger object and returns
|
||||
* RangeVar for the relation that owns the trigger.
|
||||
|
|
|
@ -614,16 +614,6 @@ WaitForSharedConnection(void)
|
|||
void
|
||||
InitializeSharedConnectionStats(void)
|
||||
{
|
||||
/* on PG 15, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(SharedConnectionStatsShmemSize());
|
||||
}
|
||||
#endif
|
||||
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = SharedConnectionStatsShmemInit;
|
||||
}
|
||||
|
|
|
@ -258,10 +258,8 @@ pg_get_sequencedef_string(Oid sequenceRelationId)
|
|||
char *typeName = format_type_be(pgSequenceForm->seqtypid);
|
||||
|
||||
char *sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND,
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
get_rel_persistence(sequenceRelationId) ==
|
||||
RELPERSISTENCE_UNLOGGED ? "UNLOGGED " : "",
|
||||
#endif
|
||||
qualifiedSequenceName,
|
||||
typeName,
|
||||
pgSequenceForm->seqincrement, pgSequenceForm->seqmin,
|
||||
|
@ -857,12 +855,10 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
|
|||
appendStringInfoString(buffer, ") ");
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if (indexStmt->nulls_not_distinct)
|
||||
{
|
||||
appendStringInfoString(buffer, "NULLS NOT DISTINCT ");
|
||||
}
|
||||
#endif /* PG_VERSION_15 */
|
||||
|
||||
if (indexStmt->options != NIL)
|
||||
{
|
||||
|
|
|
@ -159,7 +159,6 @@ DeparseAlterDatabaseStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
char *
|
||||
DeparseAlterDatabaseRefreshCollStmt(Node *node)
|
||||
{
|
||||
|
@ -174,6 +173,3 @@ DeparseAlterDatabaseRefreshCollStmt(Node *node)
|
|||
|
||||
return str.data;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
||||
bool whereClauseNeedsTransform,
|
||||
bool includeLocalTables);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static bool AppendPublicationObjects(StringInfo buf, List *publicationObjects,
|
||||
bool whereClauseNeedsTransform,
|
||||
bool includeLocalTables);
|
||||
|
@ -40,10 +39,6 @@ static void AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
|
|||
Node *whereClause,
|
||||
bool whereClauseNeedsTransform);
|
||||
static void AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action);
|
||||
#else
|
||||
static bool AppendTables(StringInfo buf, List *tables, bool includeLocalTables);
|
||||
static void AppendDefElemAction(StringInfo buf, DefElemAction action);
|
||||
#endif
|
||||
static bool AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
|
||||
bool whereClauseNeedsTransform,
|
||||
bool includeLocalTables);
|
||||
|
@ -108,7 +103,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
|||
{
|
||||
appendStringInfoString(buf, " FOR ALL TABLES");
|
||||
}
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
else if (stmt->pubobjects != NIL)
|
||||
{
|
||||
bool hasObjects = false;
|
||||
|
@ -146,32 +140,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
|||
includeLocalTables);
|
||||
}
|
||||
}
|
||||
#else
|
||||
else if (stmt->tables != NIL)
|
||||
{
|
||||
bool hasTables = false;
|
||||
RangeVar *rangeVar = NULL;
|
||||
|
||||
/*
|
||||
* Check whether there are tables to propagate, mainly to know whether
|
||||
* we should include "FOR".
|
||||
*/
|
||||
foreach_declared_ptr(rangeVar, stmt->tables)
|
||||
{
|
||||
if (includeLocalTables || IsCitusTableRangeVar(rangeVar, NoLock, false))
|
||||
{
|
||||
hasTables = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasTables)
|
||||
{
|
||||
appendStringInfoString(buf, " FOR");
|
||||
AppendTables(buf, stmt->tables, includeLocalTables);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (stmt->options != NIL)
|
||||
{
|
||||
|
@ -182,8 +150,6 @@ AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt,
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* AppendPublicationObjects appends a string representing a list of publication
|
||||
* objects to a buffer.
|
||||
|
@ -320,57 +286,6 @@ AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* AppendPublicationObjects appends a string representing a list of publication
|
||||
* objects to a buffer.
|
||||
*
|
||||
* For instance: TABLE users, departments
|
||||
*/
|
||||
static bool
|
||||
AppendTables(StringInfo buf, List *tables, bool includeLocalTables)
|
||||
{
|
||||
RangeVar *rangeVar = NULL;
|
||||
bool appendedObject = false;
|
||||
|
||||
foreach_declared_ptr(rangeVar, tables)
|
||||
{
|
||||
if (!includeLocalTables &&
|
||||
!IsCitusTableRangeVar(rangeVar, NoLock, false))
|
||||
{
|
||||
/* do not propagate local tables */
|
||||
continue;
|
||||
}
|
||||
|
||||
char *schemaName = rangeVar->schemaname;
|
||||
char *tableName = rangeVar->relname;
|
||||
|
||||
if (schemaName != NULL)
|
||||
{
|
||||
/* qualified table name */
|
||||
appendStringInfo(buf, "%s %s",
|
||||
appendedObject ? "," : " TABLE",
|
||||
quote_qualified_identifier(schemaName, tableName));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* unqualified table name */
|
||||
appendStringInfo(buf, "%s %s",
|
||||
appendedObject ? "," : " TABLE",
|
||||
quote_identifier(tableName));
|
||||
}
|
||||
|
||||
appendedObject = true;
|
||||
}
|
||||
|
||||
return appendedObject;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterPublicationSchemaStmt builds and returns a string representing
|
||||
* an AlterPublicationStmt.
|
||||
|
@ -439,19 +354,12 @@ AppendAlterPublicationStmt(StringInfo buf, AlterPublicationStmt *stmt,
|
|||
return true;
|
||||
}
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
AppendAlterPublicationAction(buf, stmt->action);
|
||||
return AppendPublicationObjects(buf, stmt->pubobjects, whereClauseNeedsTransform,
|
||||
includeLocalTables);
|
||||
#else
|
||||
AppendDefElemAction(buf, stmt->tableAction);
|
||||
return AppendTables(buf, stmt->tables, includeLocalTables);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* AppendAlterPublicationAction appends a string representing an AlterPublicationAction
|
||||
* to a buffer.
|
||||
|
@ -487,46 +395,6 @@ AppendAlterPublicationAction(StringInfo buf, AlterPublicationAction action)
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* AppendDefElemAction appends a string representing a DefElemAction
|
||||
* to a buffer.
|
||||
*/
|
||||
static void
|
||||
AppendDefElemAction(StringInfo buf, DefElemAction action)
|
||||
{
|
||||
switch (action)
|
||||
{
|
||||
case DEFELEM_ADD:
|
||||
{
|
||||
appendStringInfoString(buf, " ADD");
|
||||
break;
|
||||
}
|
||||
|
||||
case DEFELEM_DROP:
|
||||
{
|
||||
appendStringInfoString(buf, " DROP");
|
||||
break;
|
||||
}
|
||||
|
||||
case DEFELEM_SET:
|
||||
{
|
||||
appendStringInfoString(buf, " SET");
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
ereport(ERROR, (errmsg("unrecognized publication action: %d", action)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DeparseDropPublicationStmt builds and returns a string representing the DropStmt
|
||||
*/
|
||||
|
@ -651,11 +519,7 @@ AppendPublicationOptions(StringInfo stringBuffer, List *optionList)
|
|||
appendStringInfo(stringBuffer, "%s = ",
|
||||
quote_identifier(optionName));
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
if (valueType == T_Integer || valueType == T_Float || valueType == T_Boolean)
|
||||
#else
|
||||
if (valueType == T_Integer || valueType == T_Float)
|
||||
#endif
|
||||
{
|
||||
/* string escaping is unnecessary for numeric types and can cause issues */
|
||||
appendStringInfo(stringBuffer, "%s", optionValue);
|
||||
|
|
|
@ -28,9 +28,7 @@ static void AppendSequenceNameList(StringInfo buf, List *objects, ObjectType obj
|
|||
static void AppendRenameSequenceStmt(StringInfo buf, RenameStmt *stmt);
|
||||
static void AppendAlterSequenceSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
|
||||
static void AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static void AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt);
|
||||
#endif
|
||||
static void AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt);
|
||||
static void AppendGrantOnSequenceSequences(StringInfo buf, GrantStmt *stmt);
|
||||
|
||||
|
@ -262,8 +260,6 @@ AppendAlterSequenceOwnerStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* DeparseAlterSequencePersistenceStmt builds and returns a string representing
|
||||
* the AlterTableStmt consisting of changing the persistence of a sequence
|
||||
|
@ -349,9 +345,6 @@ AppendAlterSequencePersistenceStmt(StringInfo buf, AlterTableStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DeparseGrantOnSequenceStmt builds and returns a string representing the GrantOnSequenceStmt
|
||||
*/
|
||||
|
|
|
@ -193,12 +193,10 @@ AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
|
|||
{
|
||||
appendStringInfoString(buf, " UNIQUE");
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
if (constraint->nulls_not_distinct == true)
|
||||
{
|
||||
appendStringInfoString(buf, " NULLS NOT DISTINCT");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (subtype == AT_AddConstraint)
|
||||
|
|
|
@ -19,11 +19,7 @@
|
|||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
static void QualifyPublicationObjects(List *publicationObjects);
|
||||
#else
|
||||
static void QualifyTables(List *tables);
|
||||
#endif
|
||||
static void QualifyPublicationRangeVar(RangeVar *publication);
|
||||
|
||||
|
||||
|
@ -36,16 +32,10 @@ QualifyCreatePublicationStmt(Node *node)
|
|||
{
|
||||
CreatePublicationStmt *stmt = castNode(CreatePublicationStmt, node);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
QualifyPublicationObjects(stmt->pubobjects);
|
||||
#else
|
||||
QualifyTables(stmt->tables);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* QualifyPublicationObjects ensures all table names in a list of
|
||||
* publication objects are fully qualified.
|
||||
|
@ -68,26 +58,6 @@ QualifyPublicationObjects(List *publicationObjects)
|
|||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* QualifyTables ensures all table names in a list are fully qualified.
|
||||
*/
|
||||
static void
|
||||
QualifyTables(List *tables)
|
||||
{
|
||||
RangeVar *rangeVar = NULL;
|
||||
|
||||
foreach_declared_ptr(rangeVar, tables)
|
||||
{
|
||||
QualifyPublicationRangeVar(rangeVar);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* QualifyPublicationObjects ensures all table names in a list of
|
||||
* publication objects are fully qualified.
|
||||
|
@ -97,11 +67,7 @@ QualifyAlterPublicationStmt(Node *node)
|
|||
{
|
||||
AlterPublicationStmt *stmt = castNode(AlterPublicationStmt, node);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
QualifyPublicationObjects(stmt->pubobjects);
|
||||
#else
|
||||
QualifyTables(stmt->tables);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -52,8 +52,6 @@ QualifyAlterSequenceOwnerStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/*
|
||||
* QualifyAlterSequencePersistenceStmt transforms a
|
||||
* ALTER SEQUENCE .. SET LOGGED/UNLOGGED
|
||||
|
@ -80,9 +78,6 @@ QualifyAlterSequencePersistenceStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterSequenceSchemaStmt transforms a
|
||||
* ALTER SEQUENCE .. SET SCHEMA ..
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -720,10 +720,8 @@ static void RebuildWaitEventSetForSessions(DistributedExecution *execution);
|
|||
static void AddLatchWaitEventToExecution(DistributedExecution *execution);
|
||||
static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int
|
||||
eventCount, bool *cancellationReceived);
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static void RemoteSocketClosedForAnySession(DistributedExecution *execution);
|
||||
static void ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount);
|
||||
#endif
|
||||
static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
|
||||
static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time endTime);
|
||||
static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey);
|
||||
|
@ -1769,11 +1767,8 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
|
|||
session->commandsSent = 0;
|
||||
session->waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/* always detect closed sockets */
|
||||
UpdateConnectionWaitFlags(session, WL_SOCKET_CLOSED);
|
||||
#endif
|
||||
|
||||
dlist_init(&session->pendingTaskQueue);
|
||||
dlist_init(&session->readyTaskQueue);
|
||||
|
@ -1817,7 +1812,6 @@ FindOrCreateWorkerSession(WorkerPool *workerPool, MultiConnection *connection)
|
|||
* the events, even ignores cancellation events. Future callers of this
|
||||
* function should consider its limitations.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static void
|
||||
RemoteSocketClosedForAnySession(DistributedExecution *execution)
|
||||
{
|
||||
|
@ -1835,9 +1829,6 @@ RemoteSocketClosedForAnySession(DistributedExecution *execution)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* SequentialRunDistributedExecution gets a distributed execution and
|
||||
* executes each individual task in the execution sequentially, one
|
||||
|
@ -2173,8 +2164,6 @@ ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int eventC
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* ProcessWaitEventsForSocketClosed mainly checks for WL_SOCKET_CLOSED event.
|
||||
* If WL_SOCKET_CLOSED is found, the function sets the underlying connection's
|
||||
|
@ -2207,9 +2196,6 @@ ProcessWaitEventsForSocketClosed(WaitEvent *events, int eventCount)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* ManageWorkerPool ensures the worker pool has the appropriate number of connections
|
||||
* based on the number of pending tasks.
|
||||
|
@ -2704,7 +2690,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
|
|||
* Instead, we prefer this slight difference, which in effect has almost no
|
||||
* difference, but doing things in different points in time.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/* we added new connections, rebuild the waitEventSet */
|
||||
RebuildWaitEventSetForSessions(execution);
|
||||
|
@ -2724,9 +2709,6 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
|
|||
* of the execution.
|
||||
*/
|
||||
AddLatchWaitEventToExecution(execution);
|
||||
#else
|
||||
execution->rebuildWaitEventSet = true;
|
||||
#endif
|
||||
|
||||
WorkerSession *session = NULL;
|
||||
foreach_declared_ptr(session, newSessionsList)
|
||||
|
@ -3663,13 +3645,8 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
|
|||
return;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/* always detect closed sockets */
|
||||
connection->waitFlags = waitFlags | WL_SOCKET_CLOSED;
|
||||
#else
|
||||
connection->waitFlags = waitFlags;
|
||||
#endif
|
||||
|
||||
/* without signalling the execution, the flag changes won't be reflected */
|
||||
execution->waitFlagsChanged = true;
|
||||
|
@ -3694,13 +3671,11 @@ CheckConnectionReady(WorkerSession *session)
|
|||
return false;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
if ((session->latestUnconsumedWaitEvents & WL_SOCKET_CLOSED) != 0)
|
||||
{
|
||||
connection->connectionState = MULTI_CONNECTION_LOST;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* try to send all pending data */
|
||||
int sendStatus = PQflush(connection->pgConn);
|
||||
|
|
|
@ -140,19 +140,6 @@ static void CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash);
|
|||
void
|
||||
InitializeCitusQueryStats(void)
|
||||
{
|
||||
/* on PG 15, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(CitusQueryStatsSharedMemSize());
|
||||
|
||||
elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME);
|
||||
RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Install hook */
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = CitusQueryStatsShmemStartup;
|
||||
|
|
|
@ -1717,13 +1717,11 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
|
|||
/*
|
||||
* As of PostgreSQL 15, the same applies to schemas.
|
||||
*/
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
List *schemaIdList =
|
||||
GetPublicationSchemas(publicationId);
|
||||
List *schemaDependencyList =
|
||||
CreateObjectAddressDependencyDefList(NamespaceRelationId, schemaIdList);
|
||||
result = list_concat(result, schemaDependencyList);
|
||||
#endif
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1739,48 +1739,6 @@ GetSequencesFromAttrDef(Oid attrdefOid)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/*
|
||||
* Given a pg_attrdef OID, return the relation OID and column number of
|
||||
* the owning column (represented as an ObjectAddress for convenience).
|
||||
*
|
||||
* Returns InvalidObjectAddress if there is no such pg_attrdef entry.
|
||||
*/
|
||||
ObjectAddress
|
||||
GetAttrDefaultColumnAddress(Oid attrdefoid)
|
||||
{
|
||||
ObjectAddress result = InvalidObjectAddress;
|
||||
ScanKeyData skey[1];
|
||||
HeapTuple tup;
|
||||
|
||||
Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock);
|
||||
ScanKeyInit(&skey[0],
|
||||
Anum_pg_attrdef_oid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(attrdefoid));
|
||||
SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true,
|
||||
NULL, 1, skey);
|
||||
|
||||
if (HeapTupleIsValid(tup = systable_getnext(scan)))
|
||||
{
|
||||
Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup);
|
||||
|
||||
result.classId = RelationRelationId;
|
||||
result.objectId = atdform->adrelid;
|
||||
result.objectSubId = atdform->adnum;
|
||||
}
|
||||
|
||||
systable_endscan(scan);
|
||||
table_close(attrdef, AccessShareLock);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* GetAttrDefsFromSequence returns a list of attrdef OIDs that have
|
||||
* a dependency on the given sequence
|
||||
|
@ -3011,7 +2969,6 @@ SyncNodeMetadataToNodesMain(Datum main_arg)
|
|||
|
||||
PopActiveSnapshot();
|
||||
CommitTransactionCommand();
|
||||
ProcessCompletedNotifies();
|
||||
|
||||
if (syncedAllNodes)
|
||||
{
|
||||
|
|
|
@ -283,9 +283,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
|
|||
case OBJECT_FDW:
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
case OBJECT_LANGUAGE:
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
case OBJECT_PARAMETER_ACL:
|
||||
#endif
|
||||
case OBJECT_PUBLICATION:
|
||||
case OBJECT_ROLE:
|
||||
case OBJECT_SCHEMA:
|
||||
|
@ -323,9 +321,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
|
|||
break;
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
case OBJECT_PUBLICATION_NAMESPACE:
|
||||
#endif
|
||||
case OBJECT_USER_MAPPING:
|
||||
{
|
||||
objnode = (Node *) list_make2(linitial(name), linitial(args));
|
||||
|
|
|
@ -136,11 +136,8 @@ CreateCitusCustomScanPath(PlannerInfo *root, RelOptInfo *relOptInfo,
|
|||
path->custom_path.path.pathtarget = relOptInfo->reltarget;
|
||||
path->custom_path.path.parent = relOptInfo;
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* necessary to avoid extra Result node in PG15 */
|
||||
path->custom_path.flags = CUSTOMPATH_SUPPORT_PROJECTION;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The 100k rows we put on the cost of the path is kind of arbitrary and could be
|
||||
|
|
|
@ -1442,13 +1442,8 @@ FinalizePlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan)
|
|||
|
||||
customScan->custom_private = list_make1(distributedPlanData);
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
|
||||
/* necessary to avoid extra Result node in PG15 */
|
||||
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN | CUSTOMPATH_SUPPORT_PROJECTION;
|
||||
#else
|
||||
customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fast path queries cannot have any subplans by definition, so skip
|
||||
|
|
|
@ -38,8 +38,6 @@
|
|||
#include "distributed/shard_pruning.h"
|
||||
#include "distributed/shared_library_init.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
static int SourceResultPartitionColumnIndex(Query *mergeQuery,
|
||||
List *sourceTargetList,
|
||||
CitusTableCacheEntry *targetRelation);
|
||||
|
@ -100,8 +98,6 @@ static char * MergeCommandResultIdPrefix(uint64 planId);
|
|||
static void ErrorIfMergeHasReturningList(Query *query);
|
||||
static Node * GetMergeJoinCondition(Query *mergeQuery);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* CreateMergePlan
|
||||
|
@ -118,13 +114,6 @@ CreateMergePlan(uint64 planId, Query *originalQuery, Query *query,
|
|||
PlannerRestrictionContext *plannerRestrictionContext,
|
||||
ParamListInfo boundParams)
|
||||
{
|
||||
/* function is void for pre-15 versions of Postgres */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
|
||||
|
||||
#else
|
||||
|
||||
Oid targetRelationId = ModifyQueryResultRelationId(originalQuery);
|
||||
|
||||
/*
|
||||
|
@ -153,8 +142,6 @@ CreateMergePlan(uint64 planId, Query *originalQuery, Query *query,
|
|||
}
|
||||
|
||||
return distributedPlan;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -184,9 +171,6 @@ GetMergeJoinTree(Query *mergeQuery)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
|
||||
/*
|
||||
* GetMergeJoinCondition returns the quals of the ON condition
|
||||
*/
|
||||
|
@ -1443,9 +1427,6 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* ExtractMergeSourceRangeTableEntry returns the range table entry of source
|
||||
* table or source query in USING clause.
|
||||
|
@ -1453,13 +1434,6 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
|
|||
RangeTblEntry *
|
||||
ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
|
||||
{
|
||||
/* function is void for pre-15 versions of Postgres */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
|
||||
|
||||
#else
|
||||
|
||||
Assert(IsMergeQuery(query));
|
||||
|
||||
List *fromList = query->jointree->fromlist;
|
||||
|
@ -1498,8 +1472,6 @@ ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
|
|||
RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable);
|
||||
|
||||
return subqueryRte;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -1516,13 +1488,6 @@ ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
|
|||
Var *
|
||||
FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
|
||||
{
|
||||
/* function is void for pre-15 versions of Postgres */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
ereport(ERROR, (errmsg("MERGE is not supported in pre-15 Postgres versions")));
|
||||
|
||||
#else
|
||||
|
||||
Assert(IsMergeQuery(query));
|
||||
|
||||
if (!IsCitusTableType(targetRelationId, DISTRIBUTED_TABLE))
|
||||
|
@ -1593,8 +1558,6 @@ FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query)
|
|||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1289,7 +1289,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/* resolve OIDs of unknown (user-defined) types */
|
||||
Query *analyzedQuery = parse_analyze_varparams_compat(parseTree, queryString,
|
||||
Query *analyzedQuery = parse_analyze_varparams(parseTree, queryString,
|
||||
¶mTypes, &numParams, NULL);
|
||||
|
||||
/* pg_rewrite_query is a wrapper around QueryRewrite with some debugging logic */
|
||||
|
|
|
@ -122,11 +122,7 @@ update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact)
|
|||
*/
|
||||
if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD)
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
OutputPluginUpdateProgress(ctx, skipped_xact);
|
||||
#else
|
||||
OutputPluginUpdateProgress(ctx);
|
||||
#endif
|
||||
changes_count = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -174,15 +174,11 @@ static bool FinishedStartupCitusBackend = false;
|
|||
|
||||
static object_access_hook_type PrevObjectAccessHook = NULL;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static shmem_request_hook_type prev_shmem_request_hook = NULL;
|
||||
#endif
|
||||
|
||||
void _PG_init(void);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
static void citus_shmem_request(void);
|
||||
#endif
|
||||
static void CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int
|
||||
subId, void *arg);
|
||||
static void DoInitialCleanup(void);
|
||||
|
@ -475,10 +471,8 @@ _PG_init(void)
|
|||
original_client_auth_hook = ClientAuthentication_hook;
|
||||
ClientAuthentication_hook = CitusAuthHook;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
prev_shmem_request_hook = shmem_request_hook;
|
||||
shmem_request_hook = citus_shmem_request;
|
||||
#endif
|
||||
|
||||
InitializeMaintenanceDaemon();
|
||||
|
||||
|
@ -602,8 +596,6 @@ AdjustDynamicLibraryPathForCdcDecoders(void)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
|
||||
/*
|
||||
* Requests any additional shared memory required for citus.
|
||||
*/
|
||||
|
@ -624,9 +616,6 @@ citus_shmem_request(void)
|
|||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* DoInitialCleanup does cleanup at start time.
|
||||
* Currently it:
|
||||
|
|
|
@ -310,7 +310,7 @@ fake_relation_set_new_filenode(Relation rel,
|
|||
*/
|
||||
*minmulti = GetOldestMultiXactId();
|
||||
|
||||
SMgrRelation srel = RelationCreateStorage_compat(*newrnode, persistence, true);
|
||||
SMgrRelation srel = RelationCreateStorage(*newrnode, persistence, true);
|
||||
|
||||
/*
|
||||
* If required, set up an init fork for an unlogged table so that it can
|
||||
|
|
|
@ -49,13 +49,8 @@ makeIntConst(int val, int location)
|
|||
{
|
||||
A_Const *n = makeNode(A_Const);
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
n->val.ival.type = T_Integer;
|
||||
n->val.ival.ival = val;
|
||||
#else
|
||||
n->val.type = T_Integer;
|
||||
n->val.val.ival = val;
|
||||
#endif
|
||||
n->location = location;
|
||||
|
||||
return (Node *) n;
|
||||
|
|
|
@ -519,15 +519,6 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId)
|
|||
void
|
||||
InitializeBackendManagement(void)
|
||||
{
|
||||
/* on PG 15, we use shmem_request_hook_type */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
|
||||
/* allocate shared memory */
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
RequestAddinShmemSpace(BackendManagementShmemSize());
|
||||
}
|
||||
#endif
|
||||
prev_shmem_startup_hook = shmem_startup_hook;
|
||||
shmem_startup_hook = BackendManagementShmemInit;
|
||||
}
|
||||
|
|
|
@ -1393,87 +1393,6 @@ CalculateBackoffDelay(int retryCount)
|
|||
}
|
||||
|
||||
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
static const char *
|
||||
error_severity(int elevel)
|
||||
{
|
||||
const char *prefix;
|
||||
|
||||
switch (elevel)
|
||||
{
|
||||
case DEBUG1:
|
||||
case DEBUG2:
|
||||
case DEBUG3:
|
||||
case DEBUG4:
|
||||
case DEBUG5:
|
||||
{
|
||||
prefix = gettext_noop("DEBUG");
|
||||
break;
|
||||
}
|
||||
|
||||
case LOG:
|
||||
case LOG_SERVER_ONLY:
|
||||
{
|
||||
prefix = gettext_noop("LOG");
|
||||
break;
|
||||
}
|
||||
|
||||
case INFO:
|
||||
{
|
||||
prefix = gettext_noop("INFO");
|
||||
break;
|
||||
}
|
||||
|
||||
case NOTICE:
|
||||
{
|
||||
prefix = gettext_noop("NOTICE");
|
||||
break;
|
||||
}
|
||||
|
||||
case WARNING:
|
||||
{
|
||||
prefix = gettext_noop("WARNING");
|
||||
break;
|
||||
}
|
||||
|
||||
case WARNING_CLIENT_ONLY:
|
||||
{
|
||||
prefix = gettext_noop("WARNING");
|
||||
break;
|
||||
}
|
||||
|
||||
case ERROR:
|
||||
{
|
||||
prefix = gettext_noop("ERROR");
|
||||
break;
|
||||
}
|
||||
|
||||
case FATAL:
|
||||
{
|
||||
prefix = gettext_noop("FATAL");
|
||||
break;
|
||||
}
|
||||
|
||||
case PANIC:
|
||||
{
|
||||
prefix = gettext_noop("PANIC");
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
prefix = "???";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return prefix;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* bgw_generate_returned_message -
|
||||
* generates the message to be inserted into the job_run_details table
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "unistd.h"
|
||||
|
||||
#include "access/hash.h"
|
||||
#include "common/pg_prng.h"
|
||||
#include "executor/execdesc.h"
|
||||
#include "storage/ipc.h"
|
||||
#include "storage/lwlock.h"
|
||||
|
@ -38,10 +39,6 @@
|
|||
#include "distributed/tuplestore.h"
|
||||
#include "distributed/utils/citus_stat_tenants.h"
|
||||
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
#include "common/pg_prng.h"
|
||||
#endif
|
||||
|
||||
static void AttributeMetricsIfApplicable(void);
|
||||
|
||||
ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
|
||||
|
@ -298,13 +295,7 @@ AttributeTask(char *tenantId, int colocationId, CmdType commandType)
|
|||
/* If the tenant is not found in the hash table, we will track the query with a probability of StatTenantsSampleRateForNewTenants. */
|
||||
if (!found)
|
||||
{
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
double randomValue = pg_prng_double(&pg_global_prng_state);
|
||||
#else
|
||||
|
||||
/* Generate a random double between 0 and 1 */
|
||||
double randomValue = (double) random() / MAX_RANDOM_VALUE;
|
||||
#endif
|
||||
bool shouldTrackQuery = randomValue <= StatTenantsSampleRateForNewTenants;
|
||||
if (!shouldTrackQuery)
|
||||
{
|
||||
|
|
|
@ -14,14 +14,6 @@
|
|||
|
||||
#include "pg_version_constants.h"
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \
|
||||
ExecARDeleteTriggers(a, b, c, d, e, f)
|
||||
#else
|
||||
#define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \
|
||||
ExecARDeleteTriggers(a, b, c, d, e)
|
||||
#endif
|
||||
|
||||
#define ACLCHECK_OBJECT_TABLE OBJECT_TABLE
|
||||
|
||||
#define ExplainPropertyLong(qlabel, value, es) \
|
||||
|
|
|
@ -525,13 +525,11 @@ extern List * PostprocessAlterSequenceSchemaStmt(Node *node, const char *querySt
|
|||
extern List * PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern List * PreprocessAlterSequencePersistenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessSequenceAlterTableStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
#endif
|
||||
extern List * PreprocessDropSequenceStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * SequenceDropStmtObjectAddress(Node *stmt, bool missing_ok, bool
|
||||
|
@ -547,10 +545,8 @@ extern List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok,
|
|||
isPostprocess);
|
||||
extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern List * AlterSequencePersistenceStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
#endif
|
||||
extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok, bool
|
||||
isPostprocess);
|
||||
extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt);
|
||||
|
@ -754,8 +750,6 @@ extern List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk, bool
|
|||
isPostprocess);
|
||||
extern void CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt,
|
||||
char *schemaName, uint64 shardId);
|
||||
extern List * PreprocessAlterTriggerRenameStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString);
|
||||
extern void AlterTriggerRenameEventExtendNames(RenameStmt *renameTriggerStmt,
|
||||
char *schemaName, uint64 shardId);
|
||||
|
|
|
@ -267,9 +267,7 @@ extern char * DeparseDropSequenceStmt(Node *node);
|
|||
extern char * DeparseRenameSequenceStmt(Node *node);
|
||||
extern char * DeparseAlterSequenceSchemaStmt(Node *node);
|
||||
extern char * DeparseAlterSequenceOwnerStmt(Node *node);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern char * DeparseAlterSequencePersistenceStmt(Node *node);
|
||||
#endif
|
||||
extern char * DeparseGrantOnSequenceStmt(Node *node);
|
||||
|
||||
/* forward declarations for qualify_sequence_stmt.c */
|
||||
|
@ -277,9 +275,7 @@ extern void QualifyRenameSequenceStmt(Node *node);
|
|||
extern void QualifyDropSequenceStmt(Node *node);
|
||||
extern void QualifyAlterSequenceSchemaStmt(Node *node);
|
||||
extern void QualifyAlterSequenceOwnerStmt(Node *node);
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_15)
|
||||
extern void QualifyAlterSequencePersistenceStmt(Node *node);
|
||||
#endif
|
||||
extern void QualifyGrantOnSequenceStmt(Node *node);
|
||||
|
||||
#endif /* CITUS_DEPARSER_H */
|
||||
|
|
|
@ -28,11 +28,6 @@
|
|||
|
||||
#define CURSOR_OPT_FORCE_DISTRIBUTED 0x080000
|
||||
|
||||
/* Hack to compile Citus on pre-MERGE Postgres versions */
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
#define CMD_MERGE CMD_UNKNOWN
|
||||
#endif
|
||||
|
||||
|
||||
/* level of planner calls */
|
||||
extern int PlannerLevel;
|
||||
|
|
|
@ -128,9 +128,6 @@ extern List * IdentitySequenceDependencyCommandList(Oid targetRelationId);
|
|||
|
||||
extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName);
|
||||
extern List * GetSequencesFromAttrDef(Oid attrdefOid);
|
||||
#if PG_VERSION_NUM < PG_VERSION_15
|
||||
ObjectAddress GetAttrDefaultColumnAddress(Oid attrdefoid);
|
||||
#endif
|
||||
extern List * GetAttrDefsFromSequence(Oid seqOid);
|
||||
extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList,
|
||||
AttrNumber attnum, char depType);
|
||||
|
|
|
@ -286,76 +286,6 @@ typedef RangeTblEntry RTEPermissionInfo;
|
|||
|
||||
#endif
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||
#define ProcessCompletedNotifies()
|
||||
#define RelationCreateStorage_compat(a, b, c) RelationCreateStorage(a, b, c)
|
||||
#define parse_analyze_varparams_compat(a, b, c, d, e) parse_analyze_varparams(a, b, c, d, \
|
||||
e)
|
||||
#define CREATE_SEQUENCE_COMMAND \
|
||||
"CREATE %sSEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
|
||||
" MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
|
||||
" START WITH " INT64_FORMAT " CACHE " INT64_FORMAT " %sCYCLE"
|
||||
#else
|
||||
|
||||
#include "nodes/value.h"
|
||||
#include "storage/smgr.h"
|
||||
#include "utils/int8.h"
|
||||
#include "utils/rel.h"
|
||||
|
||||
typedef Value String;
|
||||
|
||||
#ifdef HAVE_LONG_INT_64
|
||||
#define strtoi64(str, endptr, base) ((int64) strtol(str, endptr, base))
|
||||
#define strtou64(str, endptr, base) ((uint64) strtoul(str, endptr, base))
|
||||
#else
|
||||
#define strtoi64(str, endptr, base) ((int64) strtoll(str, endptr, base))
|
||||
#define strtou64(str, endptr, base) ((uint64) strtoull(str, endptr, base))
|
||||
#endif
|
||||
#define RelationCreateStorage_compat(a, b, c) RelationCreateStorage(a, b)
|
||||
#define parse_analyze_varparams_compat(a, b, c, d, e) parse_analyze_varparams(a, b, c, d)
|
||||
#define pgstat_init_relation(r) pgstat_initstats(r)
|
||||
#define pg_analyze_and_rewrite_fixedparams(a, b, c, d, e) pg_analyze_and_rewrite(a, b, c, \
|
||||
d, e)
|
||||
#define boolVal(v) intVal(v)
|
||||
#define makeBoolean(val) makeInteger(val)
|
||||
|
||||
static inline int64
|
||||
pg_strtoint64(char *s)
|
||||
{
|
||||
int64 result;
|
||||
(void) scanint8(s, false, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RelationGetSmgr got backported in 13.10 and 14.7 so redefining it for any
|
||||
* version higher causes compilation errors due to redefining of the function.
|
||||
* We want to use it in all versions. So we backport it ourselves in earlier
|
||||
* versions, and rely on the Postgres provided version in the later versions.
|
||||
*/
|
||||
#if PG_VERSION_NUM < 140007
|
||||
static inline SMgrRelation
|
||||
RelationGetSmgr(Relation rel)
|
||||
{
|
||||
if (unlikely(rel->rd_smgr == NULL))
|
||||
{
|
||||
smgrsetowner(&(rel->rd_smgr), smgropen(rel->rd_node, rel->rd_backend));
|
||||
}
|
||||
return rel->rd_smgr;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#define CREATE_SEQUENCE_COMMAND \
|
||||
"CREATE SEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
|
||||
" MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
|
||||
" START WITH " INT64_FORMAT " CACHE " INT64_FORMAT " %sCYCLE"
|
||||
|
||||
#endif
|
||||
|
||||
#define SetListCellPtr(a, b) ((a)->ptr_value = (b))
|
||||
#define RangeTableEntryFromNSItem(a) ((a)->p_rte)
|
||||
#define fcGetArgValue(fc, n) ((fc)->args[n].value)
|
||||
|
@ -365,4 +295,9 @@ RelationGetSmgr(Relation rel)
|
|||
#define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false)
|
||||
#define fcSetArgNull(fc, n) fcSetArgExt(fc, n, (Datum) 0, true)
|
||||
|
||||
#define CREATE_SEQUENCE_COMMAND \
|
||||
"CREATE %sSEQUENCE IF NOT EXISTS %s AS %s INCREMENT BY " INT64_FORMAT \
|
||||
" MINVALUE " INT64_FORMAT " MAXVALUE " INT64_FORMAT \
|
||||
" START WITH " INT64_FORMAT " CACHE " INT64_FORMAT " %sCYCLE"
|
||||
|
||||
#endif /* PG_VERSION_COMPAT_H */
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#ifndef PG_VERSION_CONSTANTS
|
||||
#define PG_VERSION_CONSTANTS
|
||||
|
||||
#define PG_VERSION_14 140000
|
||||
#define PG_VERSION_15 150000
|
||||
#define PG_VERSION_16 160000
|
||||
#define PG_VERSION_17 170000
|
||||
|
|
|
@ -108,19 +108,13 @@ s/(ERROR: |WARNING: |error:) invalid socket/\1 connection not open/g
|
|||
/^\s*invalid socket$/d
|
||||
|
||||
# pg15 changes
|
||||
# can be removed when dropping PG13&14 support
|
||||
#if (PG_VERSION_NUM >= PG_VERSION_14) && (PG_VERSION_NUM < PG_VERSION_15)
|
||||
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14 support )
|
||||
s/is not a PostgreSQL server process/is not a PostgreSQL backend process/g
|
||||
s/ AS "\?column\?"//g
|
||||
s/".*\.(.*)": (found .* removable)/"\1": \2/g
|
||||
# We ignore multiline error messages, and substitute first line with a single line
|
||||
# alternative that is used in some older libpq versions.
|
||||
s/(ERROR: |WARNING: |error:) server closed the connection unexpectedly/\1 connection not open/g
|
||||
/^\s*This probably means the server terminated abnormally$/d
|
||||
/^\s*before or while processing the request.$/d
|
||||
/^\s*connection not open$/d
|
||||
#endif /* (PG_VERSION_NUM >= PG_VERSION_13) && (PG_VERSION_NUM < PG_VERSION_14) */
|
||||
|
||||
# intermediate_results
|
||||
s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g
|
||||
|
|
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- CITUS_LOCAL_TABLES_QUERIES
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
\set VERBOSITY terse
|
||||
SET citus.next_shard_id TO 1509000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
CREATE TABLE alter_am(i int);
|
||||
INSERT INTO alter_am SELECT generate_series(1,1000000);
|
||||
SELECT * FROM columnar.options WHERE relation = 'alter_am'::regclass;
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -55,7 +55,7 @@ step s1-commit:
|
|||
COMMIT;
|
||||
|
||||
s2: INFO: vacuuming "public.test_vacuum_vs_insert"
|
||||
s2: INFO: "test_vacuum_vs_insert": found 0 removable, 6 nonremovable row versions in 4 pages
|
||||
s2: INFO: "public.test_vacuum_vs_insert": found 0 removable, 6 nonremovable row versions in 4 pages
|
||||
DETAIL: 0 dead row versions cannot be removed yet.
|
||||
step s2-vacuum-full: <... completed>
|
||||
step s2-select:
|
||||
|
|
|
@ -3,17 +3,6 @@
|
|||
--
|
||||
-- Test queries on a distributed table with shards on the coordinator
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA coordinator_shouldhaveshards;
|
||||
SET search_path TO coordinator_shouldhaveshards;
|
||||
SET citus.next_shard_id TO 1503000;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- CTE_INLINE
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA cte_inline;
|
||||
SET search_path TO cte_inline;
|
||||
SET citus.next_shard_id TO 1960000;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,13 +1,6 @@
|
|||
--
|
||||
-- PG15+ test as WL_SOCKET_CLOSED exposed for PG15+
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
CREATE SCHEMA socket_close;
|
||||
SET search_path TO socket_close;
|
||||
CREATE OR REPLACE FUNCTION kill_all_cached_internal_conns(gpid bigint)
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
--
|
||||
-- PG15+ test as WL_SOCKET_CLOSED exposed for PG15+
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,16 +1,6 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- test grants are propagated when the schema is
|
||||
CREATE SCHEMA dist_schema;
|
||||
CREATE TABLE dist_schema.dist_table (id int);
|
||||
|
|
|
@ -1,400 +0,0 @@
|
|||
--
|
||||
-- GRANT_ON_SCHEMA_PROPAGATION
|
||||
--
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- test grants are propagated when the schema is
|
||||
CREATE SCHEMA dist_schema;
|
||||
CREATE TABLE dist_schema.dist_table (id int);
|
||||
CREATE SCHEMA another_dist_schema;
|
||||
CREATE TABLE another_dist_schema.dist_table (id int);
|
||||
SET citus.enable_ddl_propagation TO off;
|
||||
CREATE SCHEMA non_dist_schema;
|
||||
SET citus.enable_ddl_propagation TO on;
|
||||
-- create roles on all nodes
|
||||
CREATE USER role_1;
|
||||
CREATE USER role_2;
|
||||
CREATE USER role_3;
|
||||
-- do some varying grants
|
||||
GRANT USAGE, CREATE ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION;
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_2;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_3 WITH GRANT OPTION;
|
||||
GRANT CREATE ON SCHEMA dist_schema TO role_3;
|
||||
GRANT CREATE, USAGE ON SCHEMA dist_schema TO PUBLIC;
|
||||
RESET ROLE;
|
||||
GRANT USAGE ON SCHEMA dist_schema TO PUBLIC;
|
||||
SELECT create_distributed_table('dist_schema.dist_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('another_dist_schema.dist_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- grant all permissions
|
||||
GRANT ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, role_2, role_3 WITH GRANT OPTION;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke all permissions
|
||||
REVOKE ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role_1, role_2, role_3, PUBLIC CASCADE;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- grant with multiple permissions, roles and schemas
|
||||
GRANT USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, role_2, role_3;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke with multiple permissions, roles and schemas
|
||||
REVOKE USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role_1, role_2;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
non_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
(3 rows)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- grant with grant option
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_1, role_3 WITH GRANT OPTION;
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=U*C/postgres,role_1=U*/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke grant option for
|
||||
REVOKE GRANT OPTION FOR USAGE ON SCHEMA dist_schema FROM role_3;
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test current_user
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
ALTER ROLE role_1 SUPERUSER;
|
||||
SET citus.enable_alter_role_propagation TO OFF;
|
||||
SET ROLE role_1;
|
||||
-- this is only supported on citus enterprise where multiple users can be managed
|
||||
-- The output of the nspname select below will indicate if the create has been granted
|
||||
GRANT CREATE ON SCHEMA dist_schema TO CURRENT_USER;
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
|
||||
dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*C/postgres}
|
||||
(2 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
RESET ROLE;
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
ALTER ROLE role_1 NOSUPERUSER;
|
||||
SET citus.enable_alter_role_propagation TO OFF;
|
||||
DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table;
|
||||
DROP SCHEMA dist_schema;
|
||||
DROP SCHEMA another_dist_schema;
|
||||
DROP SCHEMA non_dist_schema;
|
||||
-- test if the grantors are propagated correctly
|
||||
-- first remove one of the worker nodes
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a new schema
|
||||
CREATE SCHEMA grantor_schema;
|
||||
-- give cascading permissions
|
||||
GRANT USAGE, CREATE ON SCHEMA grantor_schema TO role_1 WITH GRANT OPTION;
|
||||
GRANT CREATE ON SCHEMA grantor_schema TO PUBLIC;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_2 WITH GRANT OPTION;
|
||||
GRANT CREATE ON SCHEMA grantor_schema TO role_2;
|
||||
GRANT USAGE, CREATE ON SCHEMA grantor_schema TO role_3;
|
||||
GRANT CREATE, USAGE ON SCHEMA grantor_schema TO PUBLIC;
|
||||
SET ROLE role_2;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_3;
|
||||
RESET ROLE;
|
||||
-- distribute the schema
|
||||
CREATE TABLE grantor_schema.grantor_table (id INT);
|
||||
SELECT create_distributed_table('grantor_schema.grantor_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke one of the permissions
|
||||
REVOKE USAGE ON SCHEMA grantor_schema FROM role_1 CASCADE;
|
||||
-- check if revoke worked correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- test if grantor propagates correctly on already distributed schemas
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_1 WITH GRANT OPTION;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_2;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_3 WITH GRANT OPTION;
|
||||
SET ROLE role_3;
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_2;
|
||||
RESET ROLE;
|
||||
-- check the results
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
DROP TABLE grantor_schema.grantor_table;
|
||||
DROP SCHEMA grantor_schema CASCADE;
|
||||
-- test distributing the schema with another user
|
||||
CREATE SCHEMA dist_schema;
|
||||
GRANT ALL ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION;
|
||||
SET ROLE role_1;
|
||||
GRANT ALL ON SCHEMA dist_schema TO role_2 WITH GRANT OPTION;
|
||||
CREATE TABLE dist_schema.dist_table (id int);
|
||||
SELECT create_distributed_table('dist_schema.dist_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
DROP TABLE dist_schema.dist_table;
|
||||
DROP SCHEMA dist_schema CASCADE;
|
||||
-- test grants on public schema
|
||||
-- first remove one of the worker nodes
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- to avoid different output in PG15
|
||||
GRANT CREATE ON SCHEMA public TO public;
|
||||
-- distribute the public schema (it has to be distributed by now but just in case)
|
||||
CREATE TABLE public_schema_table (id INT);
|
||||
SELECT create_distributed_table('public_schema_table', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- give cascading permissions
|
||||
GRANT USAGE, CREATE ON SCHEMA PUBLIC TO role_1 WITH GRANT OPTION;
|
||||
SET ROLE role_1;
|
||||
GRANT USAGE ON SCHEMA PUBLIC TO PUBLIC;
|
||||
RESET ROLE;
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
-- revoke those new permissions
|
||||
REVOKE CREATE, USAGE ON SCHEMA PUBLIC FROM role_1 CASCADE;
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
nspname | nspacl
|
||||
---------------------------------------------------------------------
|
||||
public | {postgres=UC/postgres,=UC/postgres}
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
DROP TABLE public_schema_table;
|
||||
DROP ROLE role_1, role_2, role_3;
|
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- INSERT_SELECT_REPARTITION
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- tests behaviour of INSERT INTO ... SELECT with repartitioning
|
||||
CREATE SCHEMA insert_select_repartition;
|
||||
SET search_path TO 'insert_select_repartition';
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- INTERMEDIATE_RESULT_PRUNING
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA intermediate_result_pruning;
|
||||
SET search_path TO intermediate_result_pruning;
|
||||
SET citus.log_intermediate_results TO TRUE;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,19 +1,11 @@
|
|||
--
|
||||
-- ISSUE_5248
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- backup modes of Postgres. Specifically, there is a renaming
|
||||
-- issue: pg_stop_backup PRE PG15 vs pg_backup_stop PG15+
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
CREATE SCHEMA issue_5248;
|
||||
SET search_path TO issue_5248;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3013000;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
|
@ -219,12 +211,8 @@ FROM (
|
|||
(
|
||||
SELECT utc_offset
|
||||
FROM pg_catalog.pg_timezone_names limit 1 offset 4) limit 91) AS subq_3
|
||||
\if :server_version_ge_15
|
||||
WHERE pg_catalog.pg_backup_stop() > cast(NULL AS record) limit 100;
|
||||
ERROR: cannot push down subquery on the target list
|
||||
DETAIL: Subqueries in the SELECT part of the query can only be pushed down if they happen before aggregates and window functions
|
||||
\else
|
||||
WHERE pg_catalog.pg_stop_backup() > cast(NULL AS pg_lsn) limit 100;
|
||||
\endif
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA issue_5248 CASCADE;
|
||||
|
|
|
@ -1,230 +0,0 @@
|
|||
--
|
||||
-- ISSUE_5248
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- backup modes of Postgres. Specifically, there is a renaming
|
||||
-- issue: pg_stop_backup PRE PG15 vs pg_backup_stop PG15+
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
CREATE SCHEMA issue_5248;
|
||||
SET search_path TO issue_5248;
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3013000;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
create table countries(
|
||||
id serial primary key
|
||||
, name text
|
||||
, code varchar(2) collate "C" unique
|
||||
);
|
||||
insert into countries(name, code) select 'country-'||i, i::text from generate_series(10,99) i;
|
||||
select create_reference_table('countries');
|
||||
NOTICE: Copying data from local table...
|
||||
NOTICE: copying the data has completed
|
||||
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
||||
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_5248.countries$$)
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table orgs (
|
||||
id bigserial primary key
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
);
|
||||
select create_distributed_table('orgs', 'id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table users (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, name text
|
||||
, created_at timestamptz default now()
|
||||
, country_id int -- references countries(id)
|
||||
, score bigint generated always as (id + country_id) stored
|
||||
, primary key (org_id, id)
|
||||
);
|
||||
select create_distributed_table('users', 'org_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
alter table users add constraint fk_user_country foreign key (country_id) references countries(id);
|
||||
create table orders (
|
||||
id bigserial
|
||||
, org_id bigint references orgs(id)
|
||||
, user_id bigint
|
||||
, price int
|
||||
, info jsonb
|
||||
, primary key (org_id, id)
|
||||
, foreign key (org_id, user_id) references users(org_id, id)
|
||||
);
|
||||
select create_distributed_table('orders', 'org_id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table events (
|
||||
id bigserial not null
|
||||
, user_id bigint not null
|
||||
, org_id bigint not null
|
||||
, event_time timestamp not null default now()
|
||||
, event_type int not null default 0
|
||||
, payload jsonb
|
||||
, primary key (user_id, id)
|
||||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't colocate on correctly on org_id
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
create table local_data(
|
||||
id bigserial primary key
|
||||
, val int default ( (random()*100)::int )
|
||||
);
|
||||
insert into orgs(id, name) select i,'org-'||i from generate_series(1,10) i;
|
||||
insert into users(id, name, org_id, country_id) select i,'user-'||i, i+1, (i%90)+1 from generate_series(1,5) i;
|
||||
insert into orders(id, org_id, user_id, price) select i, ((i+1))+1 , i+1, i/100 from generate_series(1,2) i;
|
||||
insert into events(id, org_id, user_id, event_type) select i, ((i+1))+1 , i+1, i/100 from generate_series(1,10) i;
|
||||
insert into local_data(id) select generate_series(1,10);
|
||||
/*
|
||||
* Test that we don't get a crash. See #5248.
|
||||
*/
|
||||
SELECT subq_3.c15 AS c0,
|
||||
subq_3.c0 AS c1,
|
||||
subq_3.c15 AS c2,
|
||||
subq_0.c1 AS c3,
|
||||
pg_catalog.String_agg( Cast(
|
||||
(
|
||||
SELECT tgargs
|
||||
FROM pg_catalog.pg_trigger limit 1 offset 1) AS BYTEA), Cast(
|
||||
(
|
||||
SELECT minimum_value
|
||||
FROM columnar.chunk limit 1 offset 5) AS BYTEA)) OVER (partition BY subq_3.c10 ORDER BY subq_3.c12,subq_0.c2) AS c4,
|
||||
subq_0.c1 AS c5
|
||||
FROM (
|
||||
SELECT ref_1.address AS c0,
|
||||
ref_1.error AS c1,
|
||||
sample_0.NAME AS c2,
|
||||
sample_2.trftosql AS c3
|
||||
FROM pg_catalog.pg_statio_all_sequences AS ref_0
|
||||
INNER JOIN pg_catalog.pg_hba_file_rules AS ref_1
|
||||
ON ((
|
||||
SELECT pg_catalog.Max(aggnumdirectargs)
|
||||
FROM pg_catalog.pg_aggregate) <= ref_0.blks_hit)
|
||||
INNER JOIN countries AS sample_0 TABLESAMPLE system (6.4)
|
||||
INNER JOIN local_data AS sample_1 TABLESAMPLE bernoulli (8)
|
||||
ON ((
|
||||
true)
|
||||
OR (
|
||||
sample_0.NAME IS NOT NULL))
|
||||
INNER JOIN pg_catalog.pg_transform AS sample_2 TABLESAMPLE bernoulli (1.2)
|
||||
INNER JOIN pg_catalog.pg_language AS ref_2
|
||||
ON ((
|
||||
SELECT shard_cost_function
|
||||
FROM pg_catalog.pg_dist_rebalance_strategy limit 1 offset 1) IS NULL)
|
||||
RIGHT JOIN pg_catalog.pg_index AS sample_3 TABLESAMPLE system (0.3)
|
||||
ON ((
|
||||
cast(NULL AS bpchar) ~<=~ cast(NULL AS bpchar))
|
||||
OR ((
|
||||
EXISTS
|
||||
(
|
||||
SELECT sample_3.indnkeyatts AS c0,
|
||||
sample_2.trflang AS c1,
|
||||
sample_2.trftype AS c2
|
||||
FROM pg_catalog.pg_statistic_ext AS sample_4 TABLESAMPLE bernoulli (8.6)
|
||||
WHERE sample_2.trftype IS NOT NULL))
|
||||
AND (
|
||||
false)))
|
||||
ON (
|
||||
EXISTS
|
||||
(
|
||||
SELECT sample_0.id AS c0,
|
||||
sample_3.indisprimary AS c1
|
||||
FROM orgs AS sample_5 TABLESAMPLE system (5.3)
|
||||
WHERE false))
|
||||
ON (
|
||||
cast(NULL AS float8) >
|
||||
(
|
||||
SELECT pg_catalog.avg(enumsortorder)
|
||||
FROM pg_catalog.pg_enum) )
|
||||
WHERE cast(COALESCE(
|
||||
CASE
|
||||
WHEN ref_1.auth_method ~>=~ ref_1.auth_method THEN cast(NULL AS path)
|
||||
ELSE cast(NULL AS path)
|
||||
END , cast(NULL AS path)) AS path) = cast(NULL AS path)) AS subq_0,
|
||||
lateral
|
||||
(
|
||||
SELECT
|
||||
(
|
||||
SELECT pg_catalog.stddev(total_time)
|
||||
FROM pg_catalog.pg_stat_user_functions) AS c0,
|
||||
subq_0.c1 AS c1,
|
||||
subq_2.c0 AS c2,
|
||||
subq_0.c2 AS c3,
|
||||
subq_0.c0 AS c4,
|
||||
cast(COALESCE(subq_2.c0, subq_2.c0) AS text) AS c5,
|
||||
subq_2.c0 AS c6,
|
||||
subq_2.c1 AS c7,
|
||||
subq_2.c1 AS c8,
|
||||
subq_2.c1 AS c9,
|
||||
subq_0.c3 AS c10,
|
||||
pg_catalog.pg_stat_get_db_temp_files( cast(
|
||||
(
|
||||
SELECT objoid
|
||||
FROM pg_catalog.pg_description limit 1 offset 1) AS oid)) AS c11,
|
||||
subq_0.c3 AS c12,
|
||||
subq_2.c1 AS c13,
|
||||
subq_0.c0 AS c14,
|
||||
subq_0.c3 AS c15,
|
||||
subq_0.c3 AS c16,
|
||||
subq_0.c1 AS c17,
|
||||
subq_0.c2 AS c18
|
||||
FROM (
|
||||
SELECT subq_1.c2 AS c0,
|
||||
subq_0.c3 AS c1
|
||||
FROM information_schema.element_types AS ref_3,
|
||||
lateral
|
||||
(
|
||||
SELECT subq_0.c1 AS c0,
|
||||
sample_6.info AS c1,
|
||||
subq_0.c2 AS c2,
|
||||
subq_0.c3 AS c3,
|
||||
sample_6.user_id AS c5,
|
||||
ref_3.collation_name AS c6
|
||||
FROM orders AS sample_6 TABLESAMPLE system (3.8)
|
||||
WHERE sample_6.price = sample_6.org_id limit 58) AS subq_1
|
||||
WHERE (
|
||||
subq_1.c2 <= subq_0.c2)
|
||||
AND (
|
||||
cast(NULL AS line) ?-| cast(NULL AS line)) limit 59) AS subq_2
|
||||
WHERE cast(COALESCE(pg_catalog.age( cast(
|
||||
(
|
||||
SELECT pg_catalog.max(event_time)
|
||||
FROM events) AS "timestamp")),
|
||||
(
|
||||
SELECT write_lag
|
||||
FROM pg_catalog.pg_stat_replication limit 1 offset 3) ) AS "interval") >
|
||||
(
|
||||
SELECT utc_offset
|
||||
FROM pg_catalog.pg_timezone_names limit 1 offset 4) limit 91) AS subq_3
|
||||
\if :server_version_ge_15
|
||||
WHERE pg_catalog.pg_backup_stop() > cast(NULL AS record) limit 100;
|
||||
\else
|
||||
WHERE pg_catalog.pg_stop_backup() > cast(NULL AS pg_lsn) limit 100;
|
||||
ERROR: cannot push down subquery on the target list
|
||||
DETAIL: Subqueries in the SELECT part of the query can only be pushed down if they happen before aggregates and window functions
|
||||
\endif
|
||||
SET client_min_messages TO WARNING;
|
||||
DROP SCHEMA issue_5248 CASCADE;
|
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- LOCAL_SHARD_EXECUTION
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA local_shard_execution;
|
||||
SET search_path TO local_shard_execution;
|
||||
SET citus.shard_count TO 4;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- LOCAL_SHARD_EXECUTION_REPLICATED
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA local_shard_execution_replicated;
|
||||
SET search_path TO local_shard_execution_replicated;
|
||||
SET citus.shard_count TO 4;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
-- MERGE command performs a join from data_source to target_table_name
|
||||
DROP SCHEMA IF EXISTS merge_schema CASCADE;
|
||||
NOTICE: schema "merge_schema" does not exist, skipping
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
SET search_path TO merge_arbitrary_schema;
|
||||
INSERT INTO target_cj VALUES (1, 'target', 0);
|
||||
INSERT INTO target_cj VALUES (2, 'target', 0);
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
DROP SCHEMA IF EXISTS merge_arbitrary_schema CASCADE;
|
||||
CREATE SCHEMA merge_arbitrary_schema;
|
||||
SET search_path TO merge_arbitrary_schema;
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
-- We create two sets of source and target tables, one set in Postgres and
|
||||
-- the other in Citus distributed. We run the _exact_ MERGE SQL on both sets
|
||||
-- and compare the final results of the target tables in Postgres and Citus.
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
-- We create two sets of source and target tables, one set in Postgres and
|
||||
-- the other in Citus distributed. We run the _exact_ MERGE SQL on both sets
|
||||
-- and compare the final results of the target tables in Postgres and Citus.
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
-- We create two sets of source and target tables, one set in Postgres and
|
||||
-- the other in Citus distributed. We run the _exact_ MERGE SQL on both sets
|
||||
-- and compare the final results of the target tables in Postgres and Citus.
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
-- MERGE command performs a join from data_source to target_table_name
|
||||
DROP SCHEMA IF EXISTS schema_shard_table1 CASCADE;
|
||||
NOTICE: schema "schema_shard_table1" does not exist, skipping
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -1,10 +1,3 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
||||
\endif
|
||||
-- MERGE command performs a join from data_source to target_table_name
|
||||
DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE;
|
||||
NOTICE: schema "merge_vcore_schema" does not exist, skipping
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
\else
|
||||
\q
|
|
@ -214,13 +214,8 @@ SELECT con.conname
|
|||
\c - - :master_host :master_port
|
||||
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_key;
|
||||
-- Check "ADD UNIQUE NULLS NOT DISTICT"
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
ALTER TABLE AT_AddConstNoName.products ADD UNIQUE NULLS NOT DISTINCT (product_no, price);
|
||||
ALTER TABLE AT_AddConstNoName.products DROP CONSTRAINT products_product_no_price_key;
|
||||
\endif
|
||||
-- Check "ADD UNIQUE ... DEFERRABLE"
|
||||
ALTER TABLE AT_AddConstNoName.products ADD UNIQUE(product_no) INCLUDE(price) DEFERRABLE;
|
||||
\c - - :public_worker_1_host :worker_1_port
|
||||
|
|
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- MULTI_DEPARSE_SHARD_QUERY
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA multi_deparse_shard_query;
|
||||
SET search_path TO multi_deparse_shard_query;
|
||||
SET citus.next_shard_id TO 13100000;
|
||||
|
|
|
@ -1,423 +0,0 @@
|
|||
--
|
||||
-- MULTI_DEPARSE_SHARD_QUERY
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA multi_deparse_shard_query;
|
||||
SET search_path TO multi_deparse_shard_query;
|
||||
SET citus.next_shard_id TO 13100000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE FUNCTION deparse_shard_query_test(text)
|
||||
RETURNS VOID
|
||||
AS 'citus'
|
||||
LANGUAGE C STRICT;
|
||||
-- create the first table
|
||||
CREATE TABLE raw_events_1
|
||||
(tenant_id bigint,
|
||||
value_1 int,
|
||||
value_2 int,
|
||||
value_3 float,
|
||||
value_4 bigint,
|
||||
value_5 text,
|
||||
value_6 int DEfAULT 10,
|
||||
value_7 int,
|
||||
event_at date DEfAULT now()
|
||||
);
|
||||
SELECT create_distributed_table('raw_events_1', 'tenant_id', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create the first table
|
||||
CREATE TABLE raw_events_2
|
||||
(tenant_id bigint,
|
||||
value_1 int,
|
||||
value_2 int,
|
||||
value_3 float,
|
||||
value_4 bigint,
|
||||
value_5 text,
|
||||
value_6 float DEfAULT (random()*100)::float,
|
||||
value_7 int,
|
||||
event_at date DEfAULT now()
|
||||
);
|
||||
SELECT create_distributed_table('raw_events_2', 'tenant_id', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE aggregated_events
|
||||
(tenant_id bigint,
|
||||
sum_value_1 bigint,
|
||||
average_value_2 float,
|
||||
average_value_3 float,
|
||||
sum_value_4 bigint,
|
||||
sum_value_5 float,
|
||||
average_value_6 int,
|
||||
rollup_hour date);
|
||||
SELECT create_distributed_table('aggregated_events', 'tenant_id', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- start with very simple examples on a single table
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1
|
||||
SELECT * FROM raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM multi_deparse_shard_query.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(tenant_id, value_4)
|
||||
SELECT
|
||||
tenant_id, value_4
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- now that shuffle columns a bit on a single table
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4)
|
||||
SELECT
|
||||
value_2::text, value_5::int, tenant_id, value_4
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- same test on two different tables
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4)
|
||||
SELECT
|
||||
value_2::text, value_5::int, tenant_id, value_4
|
||||
FROM
|
||||
raw_events_2;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_2
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- lets do some simple aggregations
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (tenant_id, rollup_hour, sum_value_1, average_value_3, average_value_6, sum_value_4)
|
||||
SELECT
|
||||
tenant_id, date_trunc(\'hour\', event_at) , sum(value_1), avg(value_3), avg(value_6), sum(value_4)
|
||||
FROM
|
||||
raw_events_1
|
||||
GROUP BY
|
||||
tenant_id, date_trunc(\'hour\', event_at)
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM multi_deparse_shard_query.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- also some subqueries, JOINS with a complicated target lists
|
||||
-- a simple JOIN
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1 (value_3, tenant_id)
|
||||
SELECT
|
||||
raw_events_2.value_3, raw_events_1.tenant_id
|
||||
FROM
|
||||
raw_events_1, raw_events_2
|
||||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- join with group by
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1 (value_3, tenant_id)
|
||||
SELECT
|
||||
max(raw_events_2.value_3), avg(raw_events_1.value_3)
|
||||
FROM
|
||||
raw_events_1, raw_events_2
|
||||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- a more complicated JOIN
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_4, tenant_id)
|
||||
SELECT
|
||||
max(r1.value_4), r3.tenant_id
|
||||
FROM
|
||||
raw_events_1 r1, raw_events_2 r2, raw_events_1 r3
|
||||
WHERE
|
||||
r1.tenant_id = r2.tenant_id AND r2.tenant_id = r3.tenant_id
|
||||
GROUP BY
|
||||
r1.value_1, r3.tenant_id, r2.event_at
|
||||
ORDER BY
|
||||
r2.event_at DESC;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM multi_deparse_shard_query.raw_events_1 r1, multi_deparse_shard_query.raw_events_2 r2, multi_deparse_shard_query.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- queries with CTEs are supported
|
||||
SELECT deparse_shard_query_test('
|
||||
WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1)
|
||||
INSERT INTO aggregated_events (rollup_hour, sum_value_5, tenant_id)
|
||||
SELECT
|
||||
event_at, sum(value_5::int), tenant_id
|
||||
FROM
|
||||
raw_events_1
|
||||
GROUP BY
|
||||
event_at, tenant_id;
|
||||
');
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM multi_deparse_shard_query.raw_events_1 GROUP BY event_at, tenant_id
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1)
|
||||
INSERT INTO aggregated_events (sum_value_5, tenant_id)
|
||||
SELECT
|
||||
sum(value_5::int), tenant_id
|
||||
FROM
|
||||
raw_events_1
|
||||
GROUP BY
|
||||
event_at, tenant_id;
|
||||
');
|
||||
INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM multi_deparse_shard_query.raw_events_1 GROUP BY event_at, tenant_id
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_1, sum_value_5, tenant_id)
|
||||
WITH RECURSIVE hierarchy as (
|
||||
SELECT value_1, 1 AS LEVEL, tenant_id
|
||||
FROM raw_events_1
|
||||
WHERE tenant_id = 1
|
||||
UNION
|
||||
SELECT re.value_2, (h.level+1), re.tenant_id
|
||||
FROM hierarchy h JOIN raw_events_1 re
|
||||
ON (h.tenant_id = re.tenant_id AND
|
||||
h.value_1 = re.value_6))
|
||||
SELECT * FROM hierarchy WHERE LEVEL <= 2;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN multi_deparse_shard_query.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_1)
|
||||
SELECT
|
||||
DISTINCT value_1
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM multi_deparse_shard_query.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- many filters suffled
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (sum_value_5, sum_value_1, tenant_id)
|
||||
SELECT value_3, value_2, tenant_id
|
||||
FROM raw_events_1
|
||||
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM multi_deparse_shard_query.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (sum_value_5, tenant_id)
|
||||
SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
|
||||
FROM raw_events_1
|
||||
WHERE event_at = now();
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM multi_deparse_shard_query.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events (sum_value_5, tenant_id, sum_value_4)
|
||||
SELECT random(), int4eq(1, max(value_1))::int, value_6
|
||||
FROM raw_events_1
|
||||
WHERE event_at = now()
|
||||
GROUP BY event_at, value_7, value_6;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM multi_deparse_shard_query.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO aggregated_events (sum_value_1, tenant_id)
|
||||
SELECT
|
||||
count(DISTINCT CASE
|
||||
WHEN
|
||||
value_1 > 100
|
||||
THEN
|
||||
tenant_id
|
||||
ELSE
|
||||
value_6
|
||||
END) as c,
|
||||
max(tenant_id)
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM multi_deparse_shard_query.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(value_7, value_1, tenant_id)
|
||||
SELECT
|
||||
value_7, value_1, tenant_id
|
||||
FROM
|
||||
(SELECT
|
||||
tenant_id, value_2 as value_7, value_1
|
||||
FROM
|
||||
raw_events_2
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM multi_deparse_shard_query.raw_events_2) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO aggregated_events(sum_value_1, tenant_id, sum_value_5)
|
||||
SELECT
|
||||
sum(value_1), tenant_id, sum(value_5::bigint)
|
||||
FROM
|
||||
(SELECT
|
||||
raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1
|
||||
FROM
|
||||
raw_events_2, raw_events_1
|
||||
WHERE
|
||||
raw_events_1.tenant_id = raw_events_2.tenant_id
|
||||
) as foo
|
||||
GROUP BY
|
||||
tenant_id, date_trunc(\'hour\', event_at)
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_2, multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO raw_events_2(tenant_id, value_1, value_2, value_3, value_4)
|
||||
SELECT
|
||||
tenant_id, value_1, value_2, value_3, value_4
|
||||
FROM
|
||||
(SELECT
|
||||
value_2, value_4, tenant_id, value_1, value_3
|
||||
FROM
|
||||
raw_events_1
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT deparse_shard_query_test(E'
|
||||
INSERT INTO raw_events_2(tenant_id, value_1, value_4, value_2, value_3)
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
(SELECT
|
||||
value_2, value_4, tenant_id, value_1, value_3
|
||||
FROM
|
||||
raw_events_1
|
||||
) as foo
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- use a column multiple times
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(tenant_id, value_7, value_4)
|
||||
SELECT
|
||||
tenant_id, value_7, value_7
|
||||
FROM
|
||||
raw_events_1
|
||||
ORDER BY
|
||||
value_2, value_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 ORDER BY value_2, value_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test dropped table as well
|
||||
ALTER TABLE raw_events_1 DROP COLUMN value_5;
|
||||
SELECT deparse_shard_query_test('
|
||||
INSERT INTO raw_events_1(tenant_id, value_7, value_4)
|
||||
SELECT
|
||||
tenant_id, value_7, value_4
|
||||
FROM
|
||||
raw_events_1;
|
||||
');
|
||||
INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
|
||||
deparse_shard_query_test
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA multi_deparse_shard_query CASCADE;
|
|
@ -747,14 +747,8 @@ SELECT * FROM multi_extension.print_extension_changes();
|
|||
|
||||
-- recreate public schema, and recreate citus_tables in the public schema by default
|
||||
CREATE SCHEMA public;
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
|
||||
\gset
|
||||
\if :server_version_ge_15
|
||||
-- public schema is owned by pg_database_owner role
|
||||
ALTER SCHEMA public OWNER TO pg_database_owner;
|
||||
\endif
|
||||
GRANT ALL ON SCHEMA public TO public;
|
||||
ALTER EXTENSION citus UPDATE TO '9.5-1';
|
||||
ALTER EXTENSION citus UPDATE TO '10.0-4';
|
||||
|
|
|
@ -1,19 +1,8 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
CREATE SCHEMA multi_insert_select;
|
||||
SET search_path = multi_insert_select,public;
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 13300000;
|
||||
SET citus.next_placement_id TO 13300000;
|
||||
-- create co-located tables
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,17 +1,6 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT_CONFLICT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA on_conflict;
|
||||
SET search_path TO on_conflict, public;
|
||||
SET citus.next_shard_id TO 1900000;
|
||||
|
|
|
@ -1,600 +0,0 @@
|
|||
--
|
||||
-- MULTI_INSERT_SELECT_CONFLICT
|
||||
--
|
||||
-- This test file has an alternative output because of the change in the
|
||||
-- display of SQL-standard function's arguments in INSERT/SELECT in PG15.
|
||||
-- The alternative output can be deleted when we drop support for PG14
|
||||
--
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA on_conflict;
|
||||
SET search_path TO on_conflict, public;
|
||||
SET citus.next_shard_id TO 1900000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE target_table(col_1 int primary key, col_2 int);
|
||||
SELECT create_distributed_table('target_table','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6);
|
||||
CREATE TABLE source_table_1(col_1 int primary key, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_1','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5);
|
||||
CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_2','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
|
||||
SET client_min_messages to debug1;
|
||||
-- Generate series directly on the coordinator and on conflict do nothing
|
||||
INSERT INTO target_table (col_1, col_2)
|
||||
SELECT
|
||||
s, s
|
||||
FROM
|
||||
generate_series(1,10) s
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Generate series directly on the coordinator and on conflict update the target table
|
||||
INSERT INTO target_table (col_1, col_2)
|
||||
SELECT s, s
|
||||
FROM
|
||||
generate_series(1,10) s
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Since partition columns do not match, pull the data to the coordinator
|
||||
-- and do not change conflicted values
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
-- Since partition columns do not match, pull the data to the coordinator
|
||||
-- and update the non-partition column. Query is wrapped by CTE to return
|
||||
-- ordered result.
|
||||
WITH inserted_table AS (
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_2, col_3 FROM on_conflict.source_table_1 ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
(5 rows)
|
||||
|
||||
-- Subquery should be recursively planned due to the limit and do nothing on conflict
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Subquery should be recursively planned due to the limit and update on conflict
|
||||
-- Query is wrapped by CTE to return ordered result.
|
||||
WITH inserted_table AS (
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = excluded.col_2 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 4
|
||||
5 | 5
|
||||
(5 rows)
|
||||
|
||||
-- Test with multiple subqueries. Query is wrapped by CTE to return ordered result.
|
||||
WITH inserted_table AS (
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5)
|
||||
UNION
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_2
|
||||
LIMIT 5)
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM ((SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1 LIMIT 5) UNION (SELECT source_table_2.col_1, source_table_2.col_2, source_table_2.col_3 FROM on_conflict.source_table_2 LIMIT 5)) foo ON CONFLICT(col_1) DO UPDATE SET col_2 = 0 RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 0
|
||||
2 | 0
|
||||
3 | 0
|
||||
4 | 0
|
||||
5 | 0
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
-- Get the select part from cte and do nothing on conflict
|
||||
WITH cte AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Get the select part from cte and update on conflict
|
||||
WITH cte AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte.col_1, cte.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
-- Test with multiple CTEs
|
||||
WITH cte AS(
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
), cte_2 AS(
|
||||
SELECT col_1, col_2 FROM source_table_2
|
||||
)
|
||||
INSERT INTO target_table ((SELECT * FROM cte) UNION (SELECT * FROM cte_2)) ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: CTE cte is going to be inlined via distributed planning
|
||||
DEBUG: CTE cte_2 is going to be inlined via distributed planning
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
6 | 7
|
||||
7 | 8
|
||||
8 | 9
|
||||
9 | 10
|
||||
10 | 11
|
||||
(10 rows)
|
||||
|
||||
WITH inserted_table AS MATERIALIZED (
|
||||
WITH cte AS MATERIALIZED (
|
||||
SELECT col_1, col_2, col_3 FROM source_table_1
|
||||
), cte_2 AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM cte
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *
|
||||
) SELECT * FROM inserted_table ORDER BY 1;
|
||||
DEBUG: generating subplan XXX_1 for CTE inserted_table: WITH cte AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2, source_table_1.col_3 FROM on_conflict.source_table_1), cte_2 AS MATERIALIZED (SELECT cte.col_1, cte.col_2 FROM cte) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) inserted_table ORDER BY col_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
(5 rows)
|
||||
|
||||
WITH cte AS MATERIALIZED (
|
||||
WITH basic AS MATERIALIZED (
|
||||
SELECT col_1, col_2 FROM source_table_1
|
||||
)
|
||||
INSERT INTO target_table (SELECT * FROM basic) ON CONFLICT DO NOTHING RETURNING *
|
||||
)
|
||||
UPDATE target_table SET col_2 = 4 WHERE col_1 IN (SELECT col_1 FROM cte);
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: WITH basic AS MATERIALIZED (SELECT source_table_1.col_1, source_table_1.col_2 FROM on_conflict.source_table_1) INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM basic ON CONFLICT DO NOTHING RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE basic: SELECT col_1, col_2 FROM on_conflict.source_table_1
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT basic.col_1, basic.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) basic) citus_insert_select_subquery
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE on_conflict.target_table SET col_2 = 4 WHERE (col_1 OPERATOR(pg_catalog.=) ANY (SELECT cte.col_1 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte))
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
RESET client_min_messages;
|
||||
-- Following query is supported by using repartition join for the insert/select
|
||||
SELECT coordinator_plan($Q$
|
||||
EXPLAIN (costs off)
|
||||
WITH cte AS (
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM
|
||||
source_table_1
|
||||
)
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
source_table_1.col_1,
|
||||
source_table_1.col_2
|
||||
FROM cte, source_table_1
|
||||
WHERE cte.col_1 = source_table_1.col_1 ON CONFLICT DO NOTHING;
|
||||
$Q$);
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus INSERT ... SELECT)
|
||||
INSERT/SELECT method: repartition
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
(4 rows)
|
||||
|
||||
-- Tests with foreign key to reference table
|
||||
CREATE TABLE test_ref_table (key int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_ref_table');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_ref_table VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
|
||||
ALTER TABLE target_table ADD CONSTRAINT fkey FOREIGN KEY (col_1) REFERENCES test_ref_table(key) ON DELETE CASCADE;
|
||||
BEGIN;
|
||||
TRUNCATE test_ref_table CASCADE;
|
||||
NOTICE: truncate cascades to table "target_table"
|
||||
INSERT INTO
|
||||
target_table
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *;
|
||||
ERROR: insert or update on table "target_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx"
|
||||
DETAIL: Key (col_1)=(X) is not present in table "test_ref_table_xxxxxxx".
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
DELETE FROM test_ref_table WHERE key > 10;
|
||||
WITH r AS (
|
||||
INSERT INTO
|
||||
target_table
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM source_table_1 ON CONFLICT (col_1) DO UPDATE SET col_2 = 1 RETURNING *)
|
||||
SELECT * FROM r ORDER BY col_1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 1
|
||||
3 | 1
|
||||
4 | 1
|
||||
5 | 1
|
||||
(5 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- Following two queries are supported since we no not modify but only select from
|
||||
-- the target_table after modification on test_ref_table.
|
||||
BEGIN;
|
||||
TRUNCATE test_ref_table CASCADE;
|
||||
NOTICE: truncate cascades to table "target_table"
|
||||
INSERT INTO
|
||||
source_table_1
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *;
|
||||
col_1 | col_2 | col_3
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
ROLLBACK;
|
||||
BEGIN;
|
||||
DELETE FROM test_ref_table;
|
||||
INSERT INTO
|
||||
source_table_1
|
||||
SELECT
|
||||
col_2,
|
||||
col_1
|
||||
FROM target_table ON CONFLICT (col_1) DO UPDATE SET col_2 = 55 RETURNING *;
|
||||
col_1 | col_2 | col_3
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
ROLLBACK;
|
||||
-- INSERT .. SELECT with different column types
|
||||
CREATE TABLE source_table_3(col_1 numeric, col_2 numeric, col_3 numeric);
|
||||
SELECT create_distributed_table('source_table_3','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_3 VALUES(1,11,1),(2,22,2),(3,33,3),(4,44,4),(5,55,5);
|
||||
CREATE TABLE source_table_4(id int, arr_val text[]);
|
||||
SELECT create_distributed_table('source_table_4','id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_4 VALUES(1, '{"abc","cde","efg"}'), (2, '{"xyz","tvu"}');
|
||||
CREATE TABLE target_table_2(id int primary key, arr_val char(10)[]);
|
||||
SELECT create_distributed_table('target_table_2','id');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO target_table_2 VALUES(1, '{"abc","def","gyx"}');
|
||||
SET client_min_messages to debug1;
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM
|
||||
source_table_3
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2;
|
||||
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
|
||||
DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery.
|
||||
DEBUG: performing repartitioned INSERT ... SELECT
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 11
|
||||
2 | 22
|
||||
3 | 33
|
||||
4 | 44
|
||||
5 | 55
|
||||
6 | 7
|
||||
7 | 8
|
||||
8 | 9
|
||||
9 | 10
|
||||
10 | 11
|
||||
(10 rows)
|
||||
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
source_table_4
|
||||
ON CONFLICT DO NOTHING;
|
||||
SELECT * FROM target_table_2 ORDER BY 1;
|
||||
id | arr_val
|
||||
---------------------------------------------------------------------
|
||||
1 | {"abc ","def ","gyx "}
|
||||
2 | {"xyz ","tvu "}
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
-- Test with shard_replication_factor = 2
|
||||
SET citus.shard_replication_factor to 2;
|
||||
DROP TABLE target_table, source_table_1, source_table_2;
|
||||
CREATE TABLE target_table(col_1 int primary key, col_2 int);
|
||||
SELECT create_distributed_table('target_table','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6);
|
||||
CREATE TABLE source_table_1(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_1','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_1 VALUES(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5);
|
||||
CREATE TABLE source_table_2(col_1 int, col_2 int, col_3 int);
|
||||
SELECT create_distributed_table('source_table_2','col_1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO source_table_2 VALUES(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
|
||||
SET client_min_messages to debug1;
|
||||
-- Generate series directly on the coordinator and on conflict do nothing
|
||||
INSERT INTO target_table (col_1, col_2)
|
||||
SELECT
|
||||
s, s
|
||||
FROM
|
||||
generate_series(1,10) s
|
||||
ON CONFLICT DO NOTHING;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- Test with multiple subqueries
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_1
|
||||
LIMIT 5)
|
||||
UNION
|
||||
(SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table_2
|
||||
LIMIT 5)
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = 0;
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_1 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1 LIMIT 5
|
||||
DEBUG: push down of limit count: 5
|
||||
DEBUG: generating subplan XXX_2 for subquery SELECT col_1, col_2, col_3 FROM on_conflict.source_table_2 LIMIT 5
|
||||
DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer) UNION SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) foo
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 0
|
||||
2 | 0
|
||||
3 | 0
|
||||
4 | 0
|
||||
5 | 0
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
WITH cte AS MATERIALIZED(
|
||||
SELECT col_1, col_2, col_3 FROM source_table_1
|
||||
), cte_2 AS MATERIALIZED(
|
||||
SELECT col_1, col_2 FROM cte
|
||||
)
|
||||
INSERT INTO target_table SELECT * FROM cte_2 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: generating subplan XXX_1 for CTE cte: SELECT col_1, col_2, col_3 FROM on_conflict.source_table_1
|
||||
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT col_1, col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2, intermediate_result.col_3 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer, col_3 integer)) cte
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT col_1, col_2 FROM (SELECT cte_2.col_1, cte_2.col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_2) citus_insert_select_subquery
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT * FROM target_table ORDER BY 1;
|
||||
col_1 | col_2
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 3
|
||||
3 | 4
|
||||
4 | 5
|
||||
5 | 6
|
||||
6 | 0
|
||||
7 | 0
|
||||
8 | 0
|
||||
9 | 0
|
||||
10 | 0
|
||||
(10 rows)
|
||||
|
||||
-- make sure that even if COPY switchover happens
|
||||
-- the results are correct
|
||||
SET citus.copy_switchover_threshold TO 1;
|
||||
TRUNCATE target_table;
|
||||
-- load some data to make sure copy commands switch over connections
|
||||
INSERT INTO target_table SELECT i,0 FROM generate_series(0,500)i;
|
||||
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
-- make sure that SELECT only uses 1 connection 1 node
|
||||
-- yet still COPY commands use 1 connection per co-located
|
||||
-- intermediate result file
|
||||
SET citus.max_adaptive_executor_pool_size TO 1;
|
||||
INSERT INTO target_table SELECT * FROM target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1;
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: push down of limit count: 10000
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
SELECT DISTINCT col_2 FROM target_table;
|
||||
col_2
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
WITH cte_1 AS (INSERT INTO target_table SELECT * FROM target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 + 1 RETURNING *)
|
||||
SELECT DISTINCT col_2 FROM cte_1;
|
||||
DEBUG: generating subplan XXX_1 for CTE cte_1: INSERT INTO on_conflict.target_table (col_1, col_2) SELECT col_1, col_2 FROM on_conflict.target_table LIMIT 10000 ON CONFLICT(col_1) DO UPDATE SET col_2 = (excluded.col_2 OPERATOR(pg_catalog.+) 1) RETURNING target_table.col_1, target_table.col_2
|
||||
DEBUG: cannot push down this subquery
|
||||
DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
|
||||
DEBUG: push down of limit count: 10000
|
||||
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT col_2 FROM (SELECT intermediate_result.col_1, intermediate_result.col_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(col_1 integer, col_2 integer)) cte_1
|
||||
DEBUG: Collecting INSERT ... SELECT results on coordinator
|
||||
col_2
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
DROP SCHEMA on_conflict CASCADE;
|
||||
NOTICE: drop cascades to 8 other objects
|
||||
DETAIL: drop cascades to table test_ref_table
|
||||
drop cascades to table test_ref_table_1900012
|
||||
drop cascades to table source_table_3
|
||||
drop cascades to table source_table_4
|
||||
drop cascades to table target_table_2
|
||||
drop cascades to table target_table
|
||||
drop cascades to table source_table_1
|
||||
drop cascades to table source_table_2
|
|
@ -1,16 +1,6 @@
|
|||
--
|
||||
-- MULTI_METADATA_SYNC
|
||||
--
|
||||
-- this test has different output for PG14 compared to PG15
|
||||
-- In PG15, public schema is owned by pg_database_owner role
|
||||
-- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62
|
||||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
|
||||
server_version_ge_15
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of
|
||||
-- metadata changes to MX tables.
|
||||
-- Turn metadata sync off at first
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue