From 483a3a58754984844a7015660e09d5fa2c75202a Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 22 Jun 2022 16:54:35 +0200 Subject: [PATCH 01/10] PG 15 Compat: Resolve compile issues + shmem requests Similar to #5897, one more step for running Citus with PG 15. This PR at least make Citus run with PG 15. I have not tried running the tests with PG 15. Shmem changes are based on https://github.com/postgres/postgres/commit/4f2400cb3f10aa79f99fba680c198237da28dd38 Compile breaks are mostly due to #6008 --- .../commands/foreign_data_wrapper.c | 2 +- src/backend/distributed/commands/function.c | 2 +- src/backend/distributed/commands/policy.c | 8 +-- src/backend/distributed/commands/sequence.c | 2 +- .../distributed/commands/subscription.c | 2 +- .../connection/connection_configuration.c | 2 +- .../connection/shared_connection_stats.c | 10 ++-- .../distributed/executor/query_stats.c | 20 ++++--- .../replication/multi_logical_replication.c | 3 +- src/backend/distributed/shared_library_init.c | 58 +++++++++++++++++++ .../distributed/transaction/backend_data.c | 11 ++-- src/backend/distributed/utils/maintenanced.c | 11 +--- src/include/distributed/backend_data.h | 2 + src/include/distributed/maintenanced.h | 2 + src/include/distributed/query_stats.h | 3 + .../distributed/shared_connection_stats.h | 2 + 16 files changed, 105 insertions(+), 35 deletions(-) diff --git a/src/backend/distributed/commands/foreign_data_wrapper.c b/src/backend/distributed/commands/foreign_data_wrapper.c index b0553ecae..9cfd5f10e 100644 --- a/src/backend/distributed/commands/foreign_data_wrapper.c +++ b/src/backend/distributed/commands/foreign_data_wrapper.c @@ -83,7 +83,7 @@ PreprocessGrantOnFDWStmt(Node *node, const char *queryString, static bool NameListHasFDWOwnedByDistributedExtension(List *FDWNames) { - Value *FDWValue = NULL; + String *FDWValue = NULL; foreach_ptr(FDWValue, FDWNames) { /* captures the extension address during lookup */ diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index cc189e792..73c014c6a 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -2038,7 +2038,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt) List *namespaceOidList = NIL; /* iterate over all namespace names provided to get their oid's */ - Value *namespaceValue = NULL; + String *namespaceValue = NULL; foreach_ptr(namespaceValue, grantStmt->objects) { char *nspname = strVal(namespaceValue); diff --git a/src/backend/distributed/commands/policy.c b/src/backend/distributed/commands/policy.c index e41d33451..2cbf6f6f9 100644 --- a/src/backend/distributed/commands/policy.c +++ b/src/backend/distributed/commands/policy.c @@ -612,8 +612,8 @@ RenamePolicyEventExtendNames(RenameStmt *stmt, const char *schemaName, uint64 sh void DropPolicyEventExtendNames(DropStmt *dropStmt, const char *schemaName, uint64 shardId) { - Value *relationSchemaNameValue = NULL; - Value *relationNameValue = NULL; + String *relationSchemaNameValue = NULL; + String *relationNameValue = NULL; uint32 dropCount = list_length(dropStmt->objects); if (dropCount > 1) @@ -652,10 +652,10 @@ DropPolicyEventExtendNames(DropStmt *dropStmt, const char *schemaName, uint64 sh /* prefix with schema name if it is not added already */ if (relationSchemaNameValue == NULL) { - Value *schemaNameValue = makeString(pstrdup(schemaName)); + String *schemaNameValue = makeString(pstrdup(schemaName)); relationNameList = lcons(schemaNameValue, relationNameList); } - char **relationName = &(relationNameValue->val.str); + char **relationName = &(strVal(relationNameValue)); AppendShardIdToName(relationName, shardId); } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index fcf47deac..4e5cd18de 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -825,7 +825,7 @@ FilterDistributedSequences(GrantStmt *stmt) { /* iterate over all namespace names provided to get their oid's */ List *namespaceOidList = NIL; - Value *namespaceValue = NULL; + String *namespaceValue = NULL; foreach_ptr(namespaceValue, stmt->objects) { char *nspname = strVal(namespaceValue); diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index 3d32c78af..09508ee8d 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -103,7 +103,7 @@ GenerateConninfoWithAuth(char *conninfo) } else if (strcmp(option->keyword, "port") == 0) { - port = pg_atoi(option->val, 4, 0); + port = pg_strtoint32(option->val); } else if (strcmp(option->keyword, "user") == 0) { diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index 88828d4cb..3e3766a44 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -455,7 +455,7 @@ GetEffectiveConnKey(ConnectionHashKey *key) } else if (strcmp(option->keyword, "port") == 0) { - effectiveKey->port = pg_atoi(option->val, 4, 0); + effectiveKey->port = pg_strtoint32(option->val); } else if (strcmp(option->keyword, "dbname") == 0) { diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c index 4cdd065d7..8602d23c2 100644 --- a/src/backend/distributed/connection/shared_connection_stats.c +++ b/src/backend/distributed/connection/shared_connection_stats.c @@ -123,8 +123,6 @@ static void StoreAllRemoteConnectionStats(Tuplestorestate *tupleStore, TupleDesc tupleDescriptor); static void LockConnectionSharedMemory(LWLockMode lockMode); static void UnLockConnectionSharedMemory(void); -static void SharedConnectionStatsShmemInit(void); -static size_t SharedConnectionStatsShmemSize(void); static bool ShouldWaitForConnection(int currentConnectionCount); static uint32 SharedConnectionHashHash(const void *key, Size keysize); static int SharedConnectionHashCompare(const void *a, const void *b, Size keysize); @@ -617,11 +615,15 @@ WaitForSharedConnection(void) void InitializeSharedConnectionStats(void) { +/* on PG 15, we use shmem_request_hook_type */ +#if PG_VERSION_NUM < PG_VERSION_15 + /* allocate shared memory */ if (!IsUnderPostmaster) { RequestAddinShmemSpace(SharedConnectionStatsShmemSize()); } +#endif prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = SharedConnectionStatsShmemInit; @@ -632,7 +634,7 @@ InitializeSharedConnectionStats(void) * SharedConnectionStatsShmemSize returns the size that should be allocated * on the shared memory for shared connection stats. */ -static size_t +size_t SharedConnectionStatsShmemSize(void) { Size size = 0; @@ -652,7 +654,7 @@ SharedConnectionStatsShmemSize(void) * SharedConnectionStatsShmemInit initializes the shared memory used * for keeping track of connection stats across backends. */ -static void +void SharedConnectionStatsShmemInit(void) { bool alreadyInitialized = false; diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index 8921d51ff..26b3ff027 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -49,7 +49,6 @@ #define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */ #define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */ #define USAGE_INIT (1.0) /* including initial planning */ -#define STATS_SHARED_MEM_NAME "citus_query_stats" #define MAX_KEY_LENGTH NAMEDATALEN @@ -124,7 +123,6 @@ PG_FUNCTION_INFO_V1(citus_executor_name); static char * CitusExecutorName(MultiExecutorType executorType); -static Size CitusQueryStatsSharedMemSize(void); static void CitusQueryStatsShmemStartup(void); static void CitusQueryStatsShmemShutdown(int code, Datum arg); @@ -143,10 +141,18 @@ static void CitusQueryStatsRemoveExpiredEntries(HTAB *existingQueryIdHash); void InitializeCitusQueryStats(void) { - RequestAddinShmemSpace(CitusQueryStatsSharedMemSize()); +/* on PG 15, we use shmem_request_hook_type */ +#if PG_VERSION_NUM < PG_VERSION_15 - elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME); - RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1); + /* allocate shared memory */ + if (!IsUnderPostmaster) + { + RequestAddinShmemSpace(CitusQueryStatsSharedMemSize()); + + elog(LOG, "requesting named LWLockTranch for %s", STATS_SHARED_MEM_NAME); + RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1); + } +#endif /* Install hook */ prev_shmem_startup_hook = shmem_startup_hook; @@ -373,7 +379,7 @@ error: * CitusQueryStatsSharedMemSize calculates and returns shared memory size * required to keep query statistics. */ -static Size +Size CitusQueryStatsSharedMemSize(void) { Assert(StatStatementsMax >= 0); @@ -947,7 +953,7 @@ GetPGStatStatementsMax(void) */ if (pgssMax) { - maxValue = pg_atoi(pgssMax, 4, 0); + maxValue = pg_strtoint32(pgssMax); } return maxValue; diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 2a63ad0bd..9b645ead4 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -33,6 +33,7 @@ #include "catalog/namespace.h" #include "catalog/pg_constraint.h" #include "distributed/adaptive_executor.h" +#include "distributed/citus_safe_lib.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" @@ -1808,7 +1809,7 @@ TotalRelationSizeForSubscription(MultiConnection *connection, char *command) { char *resultString = PQgetvalue(result, 0, 0); - remoteTotalSize = pg_strtouint64(resultString, NULL, 10); + remoteTotalSize = SafeStringToUint64(resultString); } else { diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 7af1adfa3..6c44d2127 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -141,6 +141,7 @@ DEFINE_COLUMNAR_PASSTHROUGH_FUNC(test_columnar_storage_write_new_page) #define DUMMY_REAL_TIME_EXECUTOR_ENUM_VALUE 9999999 static char *CitusVersion = CITUS_VERSION; +static char *DeprecatedEmptyString = ""; /* deprecated GUC value that should not be used anywhere outside this file */ static int ReplicationModel = REPLICATION_MODEL_STREAMING; @@ -150,8 +151,15 @@ static GucStringAssignHook OldApplicationNameAssignHook = NULL; static object_access_hook_type PrevObjectAccessHook = NULL; +#if PG_VERSION_NUM >= PG_VERSION_15 +static shmem_request_hook_type prev_shmem_request_hook = NULL; +#endif + void _PG_init(void); +#if PG_VERSION_NUM >= PG_VERSION_15 +static void citus_shmem_request(void); +#endif static void CitusObjectAccessHook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg); static void DoInitialCleanup(void); @@ -368,6 +376,11 @@ _PG_init(void) original_client_auth_hook = ClientAuthentication_hook; ClientAuthentication_hook = CitusAuthHook; +#if PG_VERSION_NUM >= PG_VERSION_15 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = citus_shmem_request; +#endif + InitializeMaintenanceDaemon(); /* initialize coordinated transaction management */ @@ -400,6 +413,7 @@ _PG_init(void) PrevObjectAccessHook = object_access_hook; object_access_hook = CitusObjectAccessHook; + /* ensure columnar module is loaded at the right time */ load_file(COLUMNAR_MODULE_NAME, false); @@ -442,6 +456,30 @@ _PG_init(void) } +#if PG_VERSION_NUM >= PG_VERSION_15 + +/* + * Requests any additional shared memory required for citus. + */ +static void +citus_shmem_request(void) +{ + if (prev_shmem_request_hook) + { + prev_shmem_request_hook(); + } + + RequestAddinShmemSpace(BackendManagementShmemSize()); + RequestAddinShmemSpace(SharedConnectionStatsShmemSize()); + RequestAddinShmemSpace(MaintenanceDaemonShmemSize()); + RequestAddinShmemSpace(CitusQueryStatsSharedMemSize()); + RequestNamedLWLockTranche(STATS_SHARED_MEM_NAME, 1); +} + + +#endif + + /* * DoInitialCleanup does cleanup at start time. * Currently it: @@ -1234,6 +1272,26 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + /* + * This was a GUC we added on Citus 11.0.1, and + * replaced with another name on 11.0.2 via #5920. + * However, as this GUC has been used in + * citus_shard_indexes_on_worker-11.0.1 + * script. So, it is not easy to completely get rid + * of the GUC. Especially with PG 15+, Postgres verifies + * existence of the GUCs that are used. So, without this + * CREATE EXTENSION fails. + */ + DefineCustomStringVariable( + "citus.hide_shards_from_app_name_prefixes", + gettext_noop("Deprecated, use citus.show_shards_for_app_name_prefixes"), + NULL, + &DeprecatedEmptyString, + "", + PGC_SUSET, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + DefineCustomIntVariable( "citus.isolation_test_session_process_id", NULL, diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 36a09c263..60c42f7ac 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -104,8 +104,6 @@ static BackendData *MyBackendData = NULL; static CitusBackendType CurrentBackendType = CITUS_BACKEND_NOT_ASSIGNED; -static void BackendManagementShmemInit(void); -static size_t BackendManagementShmemSize(void); static void DetermineCitusBackendType(void); @@ -515,12 +513,15 @@ UserHasPermissionToViewStatsOf(Oid currentUserId, Oid backendOwnedId) void InitializeBackendManagement(void) { +/* on PG 15, we use shmem_request_hook_type */ +#if PG_VERSION_NUM < PG_VERSION_15 + /* allocate shared memory */ if (!IsUnderPostmaster) { RequestAddinShmemSpace(BackendManagementShmemSize()); } - +#endif prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = BackendManagementShmemInit; } @@ -531,7 +532,7 @@ InitializeBackendManagement(void) * memory startup hook. The function sets up the necessary shared memory * segment for the backend manager. */ -static void +void BackendManagementShmemInit(void) { bool alreadyInitialized = false; @@ -599,7 +600,7 @@ BackendManagementShmemInit(void) * BackendManagementShmemSize returns the size that should be allocated * on the shared memory for backend management. */ -static size_t +size_t BackendManagementShmemSize(void) { Size size = 0; diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 0f5d20cb6..de6abef9e 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -117,8 +117,6 @@ static bool IsMaintenanceDaemon = false; static void MaintenanceDaemonSigTermHandler(SIGNAL_ARGS); static void MaintenanceDaemonSigHupHandler(SIGNAL_ARGS); -static size_t MaintenanceDaemonShmemSize(void); -static void MaintenanceDaemonShmemInit(void); static void MaintenanceDaemonShmemExit(int code, Datum arg); static void MaintenanceDaemonErrorContext(void *arg); static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData); @@ -133,11 +131,6 @@ static void WarnMaintenanceDaemonNotStarted(void); void InitializeMaintenanceDaemon(void) { - if (!IsUnderPostmaster) - { - RequestAddinShmemSpace(MaintenanceDaemonShmemSize()); - } - prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = MaintenanceDaemonShmemInit; } @@ -743,7 +736,7 @@ CitusMaintenanceDaemonMain(Datum main_arg) /* * MaintenanceDaemonShmemSize computes how much shared memory is required. */ -static size_t +size_t MaintenanceDaemonShmemSize(void) { Size size = 0; @@ -767,7 +760,7 @@ MaintenanceDaemonShmemSize(void) * MaintenanceDaemonShmemInit initializes the requested shared memory for the * maintenance daemon. */ -static void +void MaintenanceDaemonShmemInit(void) { bool alreadyInitialized = false; diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index 62fcfede3..0c3b7ee26 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -45,6 +45,8 @@ typedef struct BackendData } BackendData; +extern void BackendManagementShmemInit(void); +extern size_t BackendManagementShmemSize(void); extern void InitializeBackendManagement(void); extern int TotalProcCount(void); extern void InitializeBackendData(void); diff --git a/src/include/distributed/maintenanced.h b/src/include/distributed/maintenanced.h index c5002021d..a09d89085 100644 --- a/src/include/distributed/maintenanced.h +++ b/src/include/distributed/maintenanced.h @@ -24,6 +24,8 @@ extern double DistributedDeadlockDetectionTimeoutFactor; extern void StopMaintenanceDaemon(Oid databaseId); extern void TriggerNodeMetadataSync(Oid databaseId); extern void InitializeMaintenanceDaemon(void); +extern size_t MaintenanceDaemonShmemSize(void); +extern void MaintenanceDaemonShmemInit(void); extern void InitializeMaintenanceDaemonBackend(void); extern bool LockCitusExtension(void); diff --git a/src/include/distributed/query_stats.h b/src/include/distributed/query_stats.h index 3c73d03ab..cc847c42b 100644 --- a/src/include/distributed/query_stats.h +++ b/src/include/distributed/query_stats.h @@ -12,6 +12,9 @@ #include "distributed/multi_server_executor.h" +#define STATS_SHARED_MEM_NAME "citus_query_stats" + +extern Size CitusQueryStatsSharedMemSize(void); extern void InitializeCitusQueryStats(void); extern void CitusQueryStatsExecutorsEntry(uint64 queryId, MultiExecutorType executorType, char *partitionKey); diff --git a/src/include/distributed/shared_connection_stats.h b/src/include/distributed/shared_connection_stats.h index 638d42f63..007691e16 100644 --- a/src/include/distributed/shared_connection_stats.h +++ b/src/include/distributed/shared_connection_stats.h @@ -25,6 +25,8 @@ extern int MaxClientConnections; extern void InitializeSharedConnectionStats(void); extern void WaitForSharedConnection(void); extern void WakeupWaiterBackendsForSharedConnection(void); +extern size_t SharedConnectionStatsShmemSize(void); +extern void SharedConnectionStatsShmemInit(void); extern int GetMaxClientConnections(void); extern int GetMaxSharedPoolSize(void); extern int GetLocalSharedPoolSize(void); From 5b3537cdff35363461d30b745efaf60790cbe7a7 Mon Sep 17 00:00:00 2001 From: Nitish Upreti Date: Mon, 18 Jul 2022 02:54:15 -0700 Subject: [PATCH 02/10] Shard Split for Citus (#6029) * Blocking split setup * Add missing type * Missing API from Metadata Sync * Shard Split e2e code * Worker Split Copy DestReceiver skeleton * Basic destreceiver code * worker_split_copy UDF * UDF calling * Split points are text * Isolate Tenant and Split Shard Unification * Fixing executor and misc * Reindent code * Fixing UDF definitions * Hello World Local Copy works * Remote copy hello world works * Local and Remote binary test * Fixing text local copy and adding tests * Hello World shard split works * Negative tests * Blocking Split workflow works * Refactor * Bug fix * Reindent * Cleaning up and adding comments * Basic test for shard split workflow * ReIndent * Circle CI integration * Removing include causing circle-ci build failure * Remove SplitCopyDestReceiver and use PartitionedResultDestReceiver * Add support for citus.enable_binary_protocol * Reindent * Fix build break * Update Test * Cleanup on catch * Addressing open comments * Update downgrade script and quote schema/table in COPY statement * Fix metadata sync issue. Update regression test * Isolation test and bug fix * Add Isolation test, fix foreign constraint deadlock issue * Misc code review comments * Test name needing to be quoted * Refactor code from review comments * Explaining shardGroupSplitIntervalListList * Fix upgrade & downgrade * Fix broken test * Test fix Round 2 * Fixing bug and modifying test appropriately * Fully qualify copy udf name. Run Reindent * Address PR comments * Fix null handling when creating AuxiliaryStructures * Ensure local copy is triggered in tests * Limit max shards that can be created with split * Test failure fix * Remove split_mode and use shard_transfer_mode instead' * Fix test failure * Fix test failure * Fixing permission issue when splitting non-superuser owned tables * Fix test expected output * Remove extra space * Fix test * attempt to fix test * Addressing Marco's PR comment * Only clean shards created by workflow * Remove from merge * Update test --- .circleci/config.yml | 12 + src/backend/distributed/commands/multi_copy.c | 3 +- .../distributed_intermediate_results.c | 4 +- .../partitioned_intermediate_results.c | 16 +- .../citus_split_shard_by_split_points.c | 100 ++ .../{split_shards.c => isolate_shards.c} | 130 +- .../distributed/operations/shard_split.c | 1117 +++++++++++++++++ .../operations/worker_shard_copy.c | 483 +++++++ .../operations/worker_split_copy_udf.c | 264 ++++ .../distributed/sql/citus--11.0-3--11.1-1.sql | 2 + .../sql/downgrades/citus--11.1-1--11.0-3.sql | 9 + .../11.1-1.sql | 14 + .../latest.sql | 14 + .../sql/udfs/worker_split_copy/11.1-1.sql | 22 + .../sql/udfs/worker_split_copy/latest.sql | 22 + src/backend/distributed/utils/array_type.c | 43 + src/include/distributed/commands/multi_copy.h | 1 + .../distributed/coordinator_protocol.h | 3 + .../distributed/intermediate_results.h | 17 + src/include/distributed/shard_split.h | 46 + src/include/distributed/utils/array_type.h | 3 +- src/include/distributed/worker_shard_copy.h | 22 + src/test/regress/Makefile | 6 +- .../regress/enterprise_isolation_schedule | 2 + .../citus_split_shard_by_split_points.out | 459 +++++++ ...us_split_shard_by_split_points_failure.out | 105 ++ ...s_split_shard_by_split_points_negative.out | 173 +++ .../isolation_blocking_shard_split.out | 951 ++++++++++++++ ...ing_shard_split_with_fkey_to_reference.out | 301 +++++ src/test/regress/expected/multi_extension.out | 7 +- .../expected/upgrade_list_citus_objects.out | 5 +- .../worker_shard_binary_copy_test.out | 227 ++++ .../expected/worker_shard_text_copy_test.out | 227 ++++ .../worker_split_binary_copy_test.out | 263 ++++ .../expected/worker_split_copy_test.out | 142 +++ .../expected/worker_split_text_copy_test.out | 226 ++++ .../spec/isolation_blocking_shard_split.spec | 146 +++ ...ng_shard_split_with_fkey_to_reference.spec | 104 ++ src/test/regress/split_schedule | 15 + .../sql/citus_split_shard_by_split_points.sql | 240 ++++ ...us_split_shard_by_split_points_failure.sql | 80 ++ ...s_split_shard_by_split_points_negative.sql | 145 +++ .../sql/worker_split_binary_copy_test.sql | 211 ++++ .../regress/sql/worker_split_copy_test.sql | 110 ++ .../sql/worker_split_text_copy_test.sql | 203 +++ 45 files changed, 6543 insertions(+), 152 deletions(-) create mode 100644 src/backend/distributed/operations/citus_split_shard_by_split_points.c rename src/backend/distributed/operations/{split_shards.c => isolate_shards.c} (84%) create mode 100644 src/backend/distributed/operations/shard_split.c create mode 100644 src/backend/distributed/operations/worker_shard_copy.c create mode 100644 src/backend/distributed/operations/worker_split_copy_udf.c create mode 100644 src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.1-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/latest.sql create mode 100644 src/backend/distributed/sql/udfs/worker_split_copy/11.1-1.sql create mode 100644 src/backend/distributed/sql/udfs/worker_split_copy/latest.sql create mode 100644 src/include/distributed/shard_split.h create mode 100644 src/include/distributed/worker_shard_copy.h create mode 100644 src/test/regress/expected/citus_split_shard_by_split_points.out create mode 100644 src/test/regress/expected/citus_split_shard_by_split_points_failure.out create mode 100644 src/test/regress/expected/citus_split_shard_by_split_points_negative.out create mode 100644 src/test/regress/expected/isolation_blocking_shard_split.out create mode 100644 src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out create mode 100644 src/test/regress/expected/worker_shard_binary_copy_test.out create mode 100644 src/test/regress/expected/worker_shard_text_copy_test.out create mode 100644 src/test/regress/expected/worker_split_binary_copy_test.out create mode 100644 src/test/regress/expected/worker_split_copy_test.out create mode 100644 src/test/regress/expected/worker_split_text_copy_test.out create mode 100644 src/test/regress/spec/isolation_blocking_shard_split.spec create mode 100644 src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec create mode 100644 src/test/regress/split_schedule create mode 100644 src/test/regress/sql/citus_split_shard_by_split_points.sql create mode 100644 src/test/regress/sql/citus_split_shard_by_split_points_failure.sql create mode 100644 src/test/regress/sql/citus_split_shard_by_split_points_negative.sql create mode 100644 src/test/regress/sql/worker_split_binary_copy_test.sql create mode 100644 src/test/regress/sql/worker_split_copy_test.sql create mode 100644 src/test/regress/sql/worker_split_text_copy_test.sql diff --git a/.circleci/config.yml b/.circleci/config.yml index aca43f3bd..a88e40094 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -643,7 +643,19 @@ workflows: image_tag: '<< pipeline.parameters.pg13_version >>' make: check-enterprise-failure requires: [build-13] + - test-citus: + name: 'test-13_check-split' + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + make: check-split + requires: [build-13] + - test-citus: + name: 'test-14_check-split' + pg_major: 14 + image_tag: '<< pipeline.parameters.pg14_version >>' + make: check-split + requires: [build-14] - test-citus: name: 'test-14_check-enterprise' pg_major: 14 diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index f56339a26..c19d26729 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -262,7 +262,6 @@ static StringInfo ConstructCopyStatement(CopyStmt *copyStatement, int64 shardId) static void SendCopyDataToAll(StringInfo dataBuffer, int64 shardId, List *connectionList); static void SendCopyDataToPlacement(StringInfo dataBuffer, int64 shardId, MultiConnection *connection); -static void ReportCopyError(MultiConnection *connection, PGresult *result); static uint32 AvailableColumnCount(TupleDesc tupleDescriptor); static Oid TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName); @@ -1201,7 +1200,7 @@ EndRemoteCopy(int64 shardId, List *connectionList) * ReportCopyError tries to report a useful error message for the user from * the remote COPY error messages. */ -static void +void ReportCopyError(MultiConnection *connection, PGresult *result) { char *remoteMessage = PQresultErrorField(result, PG_DIAG_MESSAGE_PRIMARY); diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c index f7d62e157..e7a5830e6 100644 --- a/src/backend/distributed/executor/distributed_intermediate_results.c +++ b/src/backend/distributed/executor/distributed_intermediate_results.c @@ -74,8 +74,6 @@ static void PartitioningTupleDestPutTuple(TupleDestination *self, Task *task, HeapTuple heapTuple, uint64 tupleLibpqSize); static TupleDesc PartitioningTupleDestTupleDescForQuery(TupleDestination *self, int queryNumber); -static ArrayType * CreateArrayFromDatums(Datum *datumArray, bool *nullsArray, int - datumCount, Oid typeId); static char * SourceShardPrefix(const char *resultPrefix, uint64 shardId); static DistributedResultFragment * TupleToDistributedResultFragment(HeapTuple heapTuple, TupleDesc tupleDesc, @@ -372,7 +370,7 @@ ShardMinMaxValueArrays(ShardInterval **shardIntervalArray, int shardCount, /* * CreateArrayFromDatums creates an array consisting of given values and nulls. */ -static ArrayType * +ArrayType * CreateArrayFromDatums(Datum *datumArray, bool *nullsArray, int datumCount, Oid typeId) { bool typeByValue = false; diff --git a/src/backend/distributed/executor/partitioned_intermediate_results.c b/src/backend/distributed/executor/partitioned_intermediate_results.c index b59538888..e19829ca2 100644 --- a/src/backend/distributed/executor/partitioned_intermediate_results.c +++ b/src/backend/distributed/executor/partitioned_intermediate_results.c @@ -95,18 +95,6 @@ typedef struct PartitionedResultDestReceiver } PartitionedResultDestReceiver; static Portal StartPortalForQueryExecution(const char *queryString); -static CitusTableCacheEntry * QueryTupleShardSearchInfo(ArrayType *minValuesArray, - ArrayType *maxValuesArray, - char partitionMethod, - Var *partitionColumn); -static DestReceiver * CreatePartitionedResultDestReceiver(int partitionColumnIndex, - int partitionCount, - CitusTableCacheEntry * - shardSearchInfo, - DestReceiver ** - partitionedDestReceivers, - bool lazyStartup, - bool allowNullPartitionValues); static void PartitionedResultDestReceiverStartup(DestReceiver *dest, int operation, TupleDesc inputTupleDescriptor); static bool PartitionedResultDestReceiverReceive(TupleTableSlot *slot, @@ -319,7 +307,7 @@ StartPortalForQueryExecution(const char *queryString) * information so that FindShardInterval() can find the shard corresponding * to a tuple. */ -static CitusTableCacheEntry * +CitusTableCacheEntry * QueryTupleShardSearchInfo(ArrayType *minValuesArray, ArrayType *maxValuesArray, char partitionMethod, Var *partitionColumn) { @@ -408,7 +396,7 @@ QueryTupleShardSearchInfo(ArrayType *minValuesArray, ArrayType *maxValuesArray, /* * CreatePartitionedResultDestReceiver sets up a partitioned dest receiver. */ -static DestReceiver * +DestReceiver * CreatePartitionedResultDestReceiver(int partitionColumnIndex, int partitionCount, CitusTableCacheEntry *shardSearchInfo, diff --git a/src/backend/distributed/operations/citus_split_shard_by_split_points.c b/src/backend/distributed/operations/citus_split_shard_by_split_points.c new file mode 100644 index 000000000..11e979900 --- /dev/null +++ b/src/backend/distributed/operations/citus_split_shard_by_split_points.c @@ -0,0 +1,100 @@ +/*------------------------------------------------------------------------- + * + * citus_split_shard_by_split_points.c + * + * This file contains functions to split a shard. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "catalog/pg_type.h" +#include "nodes/pg_list.h" +#include "lib/stringinfo.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "distributed/utils/array_type.h" +#include "distributed/colocation_utils.h" +#include "distributed/metadata_cache.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/connection_management.h" +#include "distributed/remote_commands.h" +#include "distributed/shard_split.h" + +/* declarations for dynamic loading */ +PG_FUNCTION_INFO_V1(citus_split_shard_by_split_points); + +static SplitMode LookupSplitMode(Oid shardTransferModeOid); + +/* + * citus_split_shard_by_split_points(shard_id bigint, split_points text[], node_ids integer[], shard_transfer_mode citus.shard_transfer_mode) + * Split source shard into multiple shards using the given split points. + * 'shard_id' is the id of source shard to split. + * 'split_points' is an array that represents the split points. + * 'node_ids' is an array that represents the placement node ids of the new shards. + * 'shard_transfer_mode citus.shard_transfer_mode' is the transfer mode for split. + */ +Datum +citus_split_shard_by_split_points(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + EnsureCoordinator(); + + uint64 shardIdToSplit = DatumGetUInt64(PG_GETARG_DATUM(0)); + + ArrayType *splitPointsArrayObject = PG_GETARG_ARRAYTYPE_P(1); + List *shardSplitPointsList = TextArrayTypeToIntegerList(splitPointsArrayObject); + + ArrayType *nodeIdsArrayObject = PG_GETARG_ARRAYTYPE_P(2); + List *nodeIdsForPlacementList = IntegerArrayTypeToList(nodeIdsArrayObject); + + Oid shardTransferModeOid = PG_GETARG_OID(3); + SplitMode shardSplitMode = LookupSplitMode(shardTransferModeOid); + + SplitShard( + shardSplitMode, + SHARD_SPLIT_API, + shardIdToSplit, + shardSplitPointsList, + nodeIdsForPlacementList); + + PG_RETURN_VOID(); +} + + +/* + * LookupSplitMode maps the oids of citus.shard_transfer_mode to SplitMode enum. + */ +SplitMode +LookupSplitMode(Oid shardTransferModeOid) +{ + SplitMode shardSplitMode = BLOCKING_SPLIT; + + Datum enumLabelDatum = DirectFunctionCall1(enum_out, shardTransferModeOid); + char *enumLabel = DatumGetCString(enumLabelDatum); + + /* Extend with other modes as we support them */ + if (strncmp(enumLabel, "block_writes", NAMEDATALEN) == 0) + { + shardSplitMode = BLOCKING_SPLIT; + } + else if (strncmp(enumLabel, "auto", NAMEDATALEN) == 0 || + strncmp(enumLabel, "force_logical", NAMEDATALEN) == 0) + { + ereport(ERROR, (errmsg( + "Shard Tranfer mode: '%s' is not supported. Please use 'block_writes' instead.", + enumLabel))); + } + else + { + /* We will not get here as postgres will validate the enum value. */ + ereport(ERROR, (errmsg( + "Invalid shard tranfer mode: '%s'. Expected split mode is 'block_writes'.", + enumLabel))); + } + + return shardSplitMode; +} diff --git a/src/backend/distributed/operations/split_shards.c b/src/backend/distributed/operations/isolate_shards.c similarity index 84% rename from src/backend/distributed/operations/split_shards.c rename to src/backend/distributed/operations/isolate_shards.c index 680a3fdf3..f78e48cc2 100644 --- a/src/backend/distributed/operations/split_shards.c +++ b/src/backend/distributed/operations/isolate_shards.c @@ -32,6 +32,7 @@ #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "distributed/version_compat.h" +#include "distributed/shard_split.h" #include "nodes/pg_list.h" #include "storage/lock.h" #include "utils/builtins.h" @@ -48,7 +49,6 @@ PG_FUNCTION_INFO_V1(worker_hash); /* local function forward declarations */ static uint64 SplitShardByValue(ShardInterval *sourceShard, Datum distributionValueDatum); -static void ErrorIfCannotSplitShard(ShardInterval *sourceShard); static void CreateSplitOffShards(ShardInterval *sourceShard, int hashedValue, List **splitOffShardList, int *isolatedShardId); static List * ShardTemplateList(ShardInterval *sourceShard, int hashedValue, @@ -62,7 +62,6 @@ static void InsertSplitOffShardMetadata(List *splitOffShardList, List *sourcePlacementList); static void CreateForeignConstraints(List *splitOffShardList, List *sourcePlacementList); static void ExecuteCommandListOnWorker(char *nodeName, int nodePort, List *commandList); -static void DropShardList(List *shardIntervalList); /* @@ -245,7 +244,7 @@ SplitShardByValue(ShardInterval *sourceShard, Datum distributionValueDatum) /* get locks */ BlockWritesToShardList(colocatedShardList); - ErrorIfCannotSplitShard(sourceShard); + ErrorIfCannotSplitShard(ISOLATE_TENANT_TO_NEW_SHARD, sourceShard); /* get hash function name */ CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); @@ -372,65 +371,6 @@ ExecuteCommandListOnWorker(char *nodeName, int nodePort, List *commandList) } -/* - * ErrorIfCannotSplitShard checks relation kind and invalid shards. It errors - * out if we are not able to split the given shard. - */ -static void -ErrorIfCannotSplitShard(ShardInterval *sourceShard) -{ - Oid relationId = sourceShard->relationId; - ListCell *colocatedTableCell = NULL; - ListCell *colocatedShardCell = NULL; - - /* checks for table ownership and foreign tables */ - List *colocatedTableList = ColocatedTableList(relationId); - foreach(colocatedTableCell, colocatedTableList) - { - Oid colocatedTableId = lfirst_oid(colocatedTableCell); - - /* check that user has owner rights in all co-located tables */ - EnsureTableOwner(colocatedTableId); - - char relationKind = get_rel_relkind(colocatedTableId); - if (relationKind == RELKIND_FOREIGN_TABLE) - { - char *relationName = get_rel_name(colocatedTableId); - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot isolate tenant because \"%s\" is a " - "foreign table", relationName), - errdetail("Isolating shards backed by foreign tables " - "is not supported."))); - } - } - - /* check shards with inactive placements */ - List *colocatedShardList = ColocatedShardIntervalList(sourceShard); - foreach(colocatedShardCell, colocatedShardList) - { - ShardInterval *shardInterval = (ShardInterval *) lfirst(colocatedShardCell); - uint64 shardId = shardInterval->shardId; - ListCell *shardPlacementCell = NULL; - - List *shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId); - foreach(shardPlacementCell, shardPlacementList) - { - ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); - if (placement->shardState != SHARD_STATE_ACTIVE) - { - char *relationName = get_rel_name(shardInterval->relationId); - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot isolate tenant because relation " - "\"%s\" has an inactive shard placement " - "for the shard %lu", relationName, shardId), - errhint("Use master_copy_shard_placement UDF to " - "repair the inactive shard placement."))); - } - } - } -} - - /* * CreateSplitOffShards gets a shard and a hashed value to pick the split point. * First, it creates templates to create new shards. Then, for every colocated @@ -754,69 +694,3 @@ InsertSplitOffShardMetadata(List *splitOffShardList, List *sourcePlacementList) SendCommandToWorkersWithMetadata(command); } } - - -/* - * DropShardList drops shards and their metadata from both the coordinator and - * mx nodes. - */ -static void -DropShardList(List *shardIntervalList) -{ - ListCell *shardIntervalCell = NULL; - - foreach(shardIntervalCell, shardIntervalList) - { - ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); - ListCell *shardPlacementCell = NULL; - Oid relationId = shardInterval->relationId; - uint64 oldShardId = shardInterval->shardId; - - /* delete metadata from synced nodes */ - if (ShouldSyncTableMetadata(relationId)) - { - ListCell *commandCell = NULL; - - /* send the commands one by one */ - List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval); - foreach(commandCell, shardMetadataDeleteCommandList) - { - char *command = (char *) lfirst(commandCell); - SendCommandToWorkersWithMetadata(command); - } - } - - /* delete shard placements and drop shards */ - List *shardPlacementList = ActiveShardPlacementList(oldShardId); - foreach(shardPlacementCell, shardPlacementList) - { - ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); - char *workerName = placement->nodeName; - uint32 workerPort = placement->nodePort; - StringInfo dropQuery = makeStringInfo(); - - DeleteShardPlacementRow(placement->placementId); - - /* get shard name */ - char *qualifiedShardName = ConstructQualifiedShardName(shardInterval); - - char storageType = shardInterval->storageType; - if (storageType == SHARD_STORAGE_TABLE) - { - appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, - qualifiedShardName); - } - else if (storageType == SHARD_STORAGE_FOREIGN) - { - appendStringInfo(dropQuery, DROP_FOREIGN_TABLE_COMMAND, - qualifiedShardName); - } - - /* drop old shard */ - SendCommandToWorker(workerName, workerPort, dropQuery->data); - } - - /* delete shard row */ - DeleteShardRow(oldShardId); - } -} diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c new file mode 100644 index 000000000..93231797d --- /dev/null +++ b/src/backend/distributed/operations/shard_split.c @@ -0,0 +1,1117 @@ +/*------------------------------------------------------------------------- + * + * shard_split.c + * + * Function definitions for the shard split. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "common/hashfn.h" +#include "nodes/pg_list.h" +#include "utils/array.h" +#include "distributed/utils/array_type.h" +#include "lib/stringinfo.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "distributed/shared_library_init.h" +#include "distributed/adaptive_executor.h" +#include "distributed/colocation_utils.h" +#include "distributed/metadata_cache.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/connection_management.h" +#include "distributed/remote_commands.h" +#include "distributed/shard_split.h" +#include "distributed/reference_table_utils.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/worker_manager.h" +#include "distributed/worker_transaction.h" +#include "distributed/shared_library_init.h" +#include "distributed/pg_dist_shard.h" +#include "distributed/metadata_sync.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/deparse_shard_query.h" + +/* + * Entry for map that tracks ShardInterval -> Placement Node + * created by split workflow. + */ +typedef struct ShardCreatedByWorkflowEntry +{ + ShardInterval *shardIntervalKey; + WorkerNode *workerNodeValue; +} ShardCreatedByWorkflowEntry; + +/* Function declarations */ +static void ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, + ShardInterval *shardIntervalToSplit, + List *shardSplitPointsList, + List *nodeIdsForPlacementList); +static void CreateAndCopySplitShardsForShardGroup( + HTAB *mapOfShardToPlacementCreatedByWorkflow, + WorkerNode *sourceShardNode, + List *sourceColocatedShardIntervalList, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static void CreateSplitShardsForShardGroup(HTAB *mapOfShardToPlacementCreatedByWorkflow, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static void CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static void CreateObjectOnPlacement(List *objectCreationCommandList, + WorkerNode *workerNode); +static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardList, + List *splitPointsForShard); +static void CreateSplitIntervalsForShard(ShardInterval *sourceShard, + List *splitPointsForShard, + List **shardSplitChildrenIntervalList); +static void BlockingShardSplit(SplitOperation splitOperation, + ShardInterval *shardIntervalToSplit, + List *shardSplitPointsList, + List *workersForPlacementList); +static void DoSplitCopy(WorkerNode *sourceShardNode, + List *sourceColocatedShardIntervalList, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static StringInfo CreateSplitCopyCommand(ShardInterval *sourceShardSplitInterval, + List *splitChildrenShardIntervalList, + List *workersForPlacementList); +static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static void CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static void TryDropSplitShardsOnFailure(HTAB *mapOfShardToPlacementCreatedByWorkflow); +static HTAB * CreateEmptyMapForShardsCreatedByWorkflow(); +static Task * CreateTaskForDDLCommandList(List *ddlCommandList, WorkerNode *workerNode); + +/* Customize error message strings based on operation type */ +static const char *const SplitOperationName[] = +{ + [SHARD_SPLIT_API] = "split", + [ISOLATE_TENANT_TO_NEW_SHARD] = "isolate", +}; +static const char *const SplitTargetName[] = +{ + [SHARD_SPLIT_API] = "shard", + [ISOLATE_TENANT_TO_NEW_SHARD] = "tenant", +}; + +/* Function definitions */ + +/* + * ErrorIfCannotSplitShard checks relation kind and invalid shards. It errors + * out if we are not able to split the given shard. + */ +void +ErrorIfCannotSplitShard(SplitOperation splitOperation, ShardInterval *sourceShard) +{ + Oid relationId = sourceShard->relationId; + ListCell *colocatedTableCell = NULL; + ListCell *colocatedShardCell = NULL; + + /* checks for table ownership and foreign tables */ + List *colocatedTableList = ColocatedTableList(relationId); + foreach(colocatedTableCell, colocatedTableList) + { + Oid colocatedTableId = lfirst_oid(colocatedTableCell); + + /* check that user has owner rights in all co-located tables */ + EnsureTableOwner(colocatedTableId); + + char relationKind = get_rel_relkind(colocatedTableId); + if (relationKind == RELKIND_FOREIGN_TABLE) + { + char *relationName = get_rel_name(colocatedTableId); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot %s %s because \"%s\" is a " + "foreign table", + SplitOperationName[splitOperation], + SplitTargetName[splitOperation], + relationName), + errdetail("Splitting shards backed by foreign tables " + "is not supported."))); + } + + /* + * At the moment, we do not support copying a shard if that shard's + * relation is in a colocation group with a partitioned table or partition. + */ + if (PartitionedTable(colocatedTableId)) + { + char *sourceRelationName = get_rel_name(relationId); + char *colocatedRelationName = get_rel_name(colocatedTableId); + + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot %s of '%s', because it " + "is a partitioned table", + SplitOperationName[splitOperation], + colocatedRelationName), + errdetail("In colocation group of '%s', a partitioned " + "relation exists: '%s'. Citus does not support " + "%s of partitioned tables.", + sourceRelationName, + colocatedRelationName, + SplitOperationName[splitOperation]))); + } + } + + /* check shards with inactive placements */ + List *colocatedShardList = ColocatedShardIntervalList(sourceShard); + foreach(colocatedShardCell, colocatedShardList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(colocatedShardCell); + uint64 shardId = shardInterval->shardId; + ListCell *shardPlacementCell = NULL; + + List *shardPlacementList = ShardPlacementListWithoutOrphanedPlacements(shardId); + foreach(shardPlacementCell, shardPlacementList) + { + ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); + if (placement->shardState != SHARD_STATE_ACTIVE) + { + char *relationName = get_rel_name(shardInterval->relationId); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot %s %s because relation " + "\"%s\" has an inactive shard placement " + "for the shard %lu", + SplitOperationName[splitOperation], + SplitTargetName[splitOperation], + relationName, shardId), + errhint("Use master_copy_shard_placement UDF to " + "repair the inactive shard placement."))); + } + } + } +} + + +/* + * Exteded checks before we decide to split the shard. + * When all consumers (Example : ISOLATE_TENANT_TO_NEW_SHARD) directly call 'SplitShard' API, + * this method will be merged with 'ErrorIfCannotSplitShard' above. + */ +static void +ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, + ShardInterval *shardIntervalToSplit, + List *shardSplitPointsList, + List *nodeIdsForPlacementList) +{ + CitusTableCacheEntry *cachedTableEntry = GetCitusTableCacheEntry( + shardIntervalToSplit->relationId); + + /* Perform checks common to both blocking and non-blocking Split API here. */ + if (!IsCitusTableTypeCacheEntry(cachedTableEntry, HASH_DISTRIBUTED)) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Cannot %s %s as operation " + "is only supported for hash distributed tables.", + SplitOperationName[splitOperation], + SplitTargetName[splitOperation]))); + } + + if (extern_IsColumnarTableAmTable(shardIntervalToSplit->relationId)) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Cannot %s %s as operation " + "is not supported for Columnar tables.", + SplitOperationName[splitOperation], + SplitTargetName[splitOperation]))); + } + + uint32 relationReplicationFactor = TableShardReplicationFactor( + shardIntervalToSplit->relationId); + if (relationReplicationFactor > 1) + { + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg( + "Operation %s not supported for %s as replication factor '%u' " + "is greater than 1.", + SplitOperationName[splitOperation], + SplitTargetName[splitOperation], + relationReplicationFactor))); + } + + int splitPointsCount = list_length(shardSplitPointsList); + int nodeIdsCount = list_length(nodeIdsForPlacementList); + int shardsCount = splitPointsCount + 1; + if (nodeIdsCount != shardsCount) + { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg( + "Number of worker node ids should be one greater split points. " + "NodeId count is '%d' and SplitPoint count is '%d'.", + nodeIdsCount, + splitPointsCount))); + } + + if (shardsCount > MAX_SHARD_COUNT) + { + ereport(ERROR, (errmsg( + "Resulting shard count '%d' with split is greater than max shard count '%d' limit.", + shardsCount, MAX_SHARD_COUNT))); + } + + Assert(shardIntervalToSplit->minValueExists); + Assert(shardIntervalToSplit->maxValueExists); + + /* We already verified table is Hash Distributed. We know (minValue, maxValue) are integers. */ + int32 minValue = DatumGetInt32(shardIntervalToSplit->minValue); + int32 maxValue = DatumGetInt32(shardIntervalToSplit->maxValue); + + /* Fail if Shard Interval cannot be split anymore i.e (min, max) range overlap. */ + if (minValue == maxValue) + { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg( + "Cannot split shard id \"%lu\" as min/max range are equal: ('%d', '%d').", + shardIntervalToSplit->shardId, + minValue, + maxValue))); + } + + NullableDatum lastShardSplitPoint = { 0, true /*isnull*/ }; + Datum shardSplitPoint; + foreach_int(shardSplitPoint, shardSplitPointsList) + { + int32 shardSplitPointValue = DatumGetInt32(shardSplitPoint); + + /* + * 1) All Split points should lie within the shard interval range. + * 2) Given our split points inclusive, you cannot specify the max value in a range as a split point. + * Example: Shard 81060002 range is from (0,1073741823). '1073741823' as split point is invalid. + * '1073741822' is correct and will split shard to: (0, 1073741822) and (1073741823, 1073741823). + */ + if (shardSplitPointValue < minValue || shardSplitPointValue > maxValue) + { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg( + "Split point %d is outside the min/max range(%d, %d) for shard id %lu.", + shardSplitPointValue, + DatumGetInt32(shardIntervalToSplit->minValue), + DatumGetInt32(shardIntervalToSplit->maxValue), + shardIntervalToSplit->shardId))); + } + else if (maxValue == shardSplitPointValue) + { + int32 validSplitPoint = shardIntervalToSplit->maxValue - 1; + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg( + "Invalid split point %d, as split points should be inclusive. Please use %d instead.", + maxValue, + validSplitPoint))); + } + + /* Split points should be in strictly increasing order */ + int32 lastShardSplitPointValue = DatumGetInt32(lastShardSplitPoint.value); + if (!lastShardSplitPoint.isnull && shardSplitPointValue <= + lastShardSplitPointValue) + { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg( + "Invalid Split Points '%d' followed by '%d'. " + "All split points should be strictly increasing.", + lastShardSplitPointValue, + shardSplitPointValue))); + } + + lastShardSplitPoint = (NullableDatum) { + shardSplitPoint, false + }; + } +} + + +/* + * SplitShard API to split a given shard (or shard group) based on specified split points + * to a set of destination nodes. + * 'splitMode' : Mode of split operation. + * 'splitOperation' : Customer operation that triggered split. + * 'shardInterval' : Source shard interval to be split. + * 'shardSplitPointsList' : Split Points list for the source 'shardInterval'. + * 'nodeIdsForPlacementList' : Placement list corresponding to split children. + */ +void +SplitShard(SplitMode splitMode, + SplitOperation splitOperation, + uint64 shardIdToSplit, + List *shardSplitPointsList, + List *nodeIdsForPlacementList) +{ + if (XactModificationLevel > XACT_MODIFICATION_NONE) + { + ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("cannot %s %s after other modifications " + "in the same transaction.", + SplitOperationName[splitOperation], + SplitTargetName[splitOperation]))); + } + + ShardInterval *shardIntervalToSplit = LoadShardInterval(shardIdToSplit); + List *colocatedTableList = ColocatedTableList(shardIntervalToSplit->relationId); + + /* sort the tables to avoid deadlocks */ + colocatedTableList = SortList(colocatedTableList, CompareOids); + Oid colocatedTableId = InvalidOid; + foreach_oid(colocatedTableId, colocatedTableList) + { + /* + * Block concurrent DDL / TRUNCATE commands on the relation. Similarly, + * block concurrent citus_move_shard_placement() / isolate_tenant_to_new_shard() + * on any shard of the same relation. + */ + LockRelationOid(colocatedTableId, ShareUpdateExclusiveLock); + } + + ErrorIfCannotSplitShard(SHARD_SPLIT_API, shardIntervalToSplit); + ErrorIfCannotSplitShardExtended( + SHARD_SPLIT_API, + shardIntervalToSplit, + shardSplitPointsList, + nodeIdsForPlacementList); + + List *workersForPlacementList = NIL; + Datum nodeId; + foreach_int(nodeId, nodeIdsForPlacementList) + { + uint32 nodeIdValue = DatumGetUInt32(nodeId); + WorkerNode *workerNode = LookupNodeByNodeId(nodeIdValue); + + /* NodeId in Citus are unsigned and range from [1, 4294967296]. */ + if (nodeIdValue < 1 || workerNode == NULL) + { + ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("Invalid Node Id '%u'.", nodeIdValue))); + } + + workersForPlacementList = + lappend(workersForPlacementList, (void *) workerNode); + } + + if (splitMode == BLOCKING_SPLIT) + { + EnsureReferenceTablesExistOnAllNodesExtended(TRANSFER_MODE_BLOCK_WRITES); + BlockingShardSplit( + splitOperation, + shardIntervalToSplit, + shardSplitPointsList, + workersForPlacementList); + } + else + { + /* we only support blocking shard split in this code path for now. */ + ereport(ERROR, (errmsg("Invalid split mode value %d.", splitMode))); + } +} + + +/* + * ShardIntervalHashCode computes the hash code for a shard from the + * placement's shard id. + */ +static uint32 +ShardIntervalHashCode(const void *key, Size keySize) +{ + const ShardInterval *shardInterval = (const ShardInterval *) key; + const uint64 *shardId = &(shardInterval->shardId); + + /* standard hash function outlined in Effective Java, Item 8 */ + uint32 result = 17; + result = 37 * result + tag_hash(shardId, sizeof(uint64)); + + return result; +} + + +/* + * ShardIntervalHashCompare compares two shard intervals using shard id. + */ +static int +ShardIntervalHashCompare(const void *lhsKey, const void *rhsKey, Size keySize) +{ + const ShardInterval *intervalLhs = (const ShardInterval *) lhsKey; + const ShardInterval *intervalRhs = (const ShardInterval *) rhsKey; + + int shardIdCompare = 0; + + /* first, compare by shard id */ + if (intervalLhs->shardId < intervalRhs->shardId) + { + shardIdCompare = -1; + } + else if (intervalLhs->shardId > intervalRhs->shardId) + { + shardIdCompare = 1; + } + + return shardIdCompare; +} + + +/* Create an empty map that tracks ShardInterval -> Placement Node as created by workflow */ +static HTAB * +CreateEmptyMapForShardsCreatedByWorkflow() +{ + HASHCTL info = { 0 }; + info.keysize = sizeof(ShardInterval); + info.entrysize = sizeof(ShardCreatedByWorkflowEntry); + info.hash = ShardIntervalHashCode; + info.match = ShardIntervalHashCompare; + info.hcxt = CurrentMemoryContext; + + /* we don't have value field as it's a set */ + info.entrysize = info.keysize; + uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + HTAB *splitChildrenCreatedByWorkflow = hash_create("Shard id to Node Placement Map", + 32, &info, hashFlags); + return splitChildrenCreatedByWorkflow; +} + + +/* + * SplitShard API to split a given shard (or shard group) in blocking fashion + * based on specified split points to a set of destination nodes. + * 'splitOperation' : Customer operation that triggered split. + * 'shardIntervalToSplit' : Source shard interval to be split. + * 'shardSplitPointsList' : Split Points list for the source 'shardInterval'. + * 'workersForPlacementList' : Placement list corresponding to split children. + */ +static void +BlockingShardSplit(SplitOperation splitOperation, + ShardInterval *shardIntervalToSplit, + List *shardSplitPointsList, + List *workersForPlacementList) +{ + List *sourceColocatedShardIntervalList = ColocatedShardIntervalList( + shardIntervalToSplit); + + BlockWritesToShardList(sourceColocatedShardIntervalList); + + /* First create shard interval metadata for split children */ + List *shardGroupSplitIntervalListList = CreateSplitIntervalsForShardGroup( + sourceColocatedShardIntervalList, + shardSplitPointsList); + + /* Only single placement allowed (already validated RelationReplicationFactor = 1) */ + List *sourcePlacementList = ActiveShardPlacementList(shardIntervalToSplit->shardId); + Assert(sourcePlacementList->length == 1); + ShardPlacement *sourceShardPlacement = (ShardPlacement *) linitial( + sourcePlacementList); + WorkerNode *sourceShardToCopyNode = FindNodeWithNodeId(sourceShardPlacement->nodeId, + false /* missingOk */); + + + HTAB *mapOfShardToPlacementCreatedByWorkflow = + CreateEmptyMapForShardsCreatedByWorkflow(); + PG_TRY(); + { + /* + * Physically create split children, perform split copy and create auxiliary structures. + * This includes: indexes, replicaIdentity. triggers and statistics. + * Foreign key constraints are created after Metadata changes (see CreateForeignKeyConstraints). + */ + CreateAndCopySplitShardsForShardGroup( + mapOfShardToPlacementCreatedByWorkflow, + sourceShardToCopyNode, + sourceColocatedShardIntervalList, + shardGroupSplitIntervalListList, + workersForPlacementList); + + /* + * Drop old shards and delete related metadata. Have to do that before + * creating the new shard metadata, because there's cross-checks + * preventing inconsistent metadata (like overlapping shards). + */ + DropShardList(sourceColocatedShardIntervalList); + + /* Insert new shard and placement metdata */ + InsertSplitChildrenShardMetadata(shardGroupSplitIntervalListList, + workersForPlacementList); + + /* + * Create foreign keys if exists after the metadata changes happening in + * DropShardList() and InsertSplitChildrenShardMetadata() because the foreign + * key creation depends on the new metadata. + */ + CreateForeignKeyConstraints(shardGroupSplitIntervalListList, + workersForPlacementList); + } + PG_CATCH(); + { + /* Do a best effort cleanup of shards created on workers in the above block */ + TryDropSplitShardsOnFailure(mapOfShardToPlacementCreatedByWorkflow); + + PG_RE_THROW(); + } + PG_END_TRY(); + + CitusInvalidateRelcacheByRelid(DistShardRelationId()); +} + + +/* Create ShardGroup split children on a list of corresponding workers. */ +static void +CreateSplitShardsForShardGroup(HTAB *mapOfShardToPlacementCreatedByWorkflow, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + /* + * Iterate over all the shards in the shard group. + */ + List *shardIntervalList = NIL; + foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + { + ShardInterval *shardInterval = NULL; + WorkerNode *workerPlacementNode = NULL; + + /* + * Iterate on split shards DDL command list for a given shard + * and create them on corresponding workerPlacementNode. + */ + forboth_ptr(shardInterval, shardIntervalList, workerPlacementNode, + workersForPlacementList) + { + /* Populate list of commands necessary to create shard interval on destination */ + List *splitShardCreationCommandList = GetPreLoadTableCreationCommands( + shardInterval->relationId, + false, /* includeSequenceDefaults */ + NULL /* auto add columnar options for cstore tables */); + splitShardCreationCommandList = WorkerApplyShardDDLCommandList( + splitShardCreationCommandList, + shardInterval->shardId); + + /* Create new split child shard on the specified placement list */ + CreateObjectOnPlacement(splitShardCreationCommandList, workerPlacementNode); + + ShardCreatedByWorkflowEntry entry; + entry.shardIntervalKey = shardInterval; + entry.workerNodeValue = workerPlacementNode; + bool found = false; + hash_search(mapOfShardToPlacementCreatedByWorkflow, &entry, HASH_ENTER, + &found); + Assert(!found); + } + } +} + + +/* Create a DDL task with corresponding task list on given worker node */ +static Task * +CreateTaskForDDLCommandList(List *ddlCommandList, WorkerNode *workerNode) +{ + Task *ddlTask = CitusMakeNode(Task); + ddlTask->taskType = DDL_TASK; + ddlTask->replicationModel = REPLICATION_MODEL_INVALID; + SetTaskQueryStringList(ddlTask, ddlCommandList); + + ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(taskPlacement, workerNode); + ddlTask->taskPlacementList = list_make1(taskPlacement); + + return ddlTask; +} + + +/* Create ShardGroup auxiliary structures (indexes, stats, replicaindentities, triggers) + * on a list of corresponding workers. + */ +static void +CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + List *shardIntervalList = NIL; + List *ddlTaskExecList = NIL; + + /* + * Iterate over all the shards in the shard group. + */ + foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + { + ShardInterval *shardInterval = NULL; + WorkerNode *workerPlacementNode = NULL; + + /* + * Iterate on split shard interval list for given shard and create tasks + * for every single split shard in a shard group. + */ + forboth_ptr(shardInterval, shardIntervalList, workerPlacementNode, + workersForPlacementList) + { + List *ddlCommandList = GetPostLoadTableCreationCommands( + shardInterval->relationId, + true /* includeIndexes */, + true /* includeReplicaIdentity */); + ddlCommandList = WorkerApplyShardDDLCommandList( + ddlCommandList, + shardInterval->shardId); + + /* + * A task is expected to be instantiated with a non-null 'ddlCommandList'. + * The list can be empty, if no auxiliary structures are present. + */ + if (ddlCommandList != NULL) + { + Task *ddlTask = CreateTaskForDDLCommandList(ddlCommandList, + workerPlacementNode); + + ddlTaskExecList = lappend(ddlTaskExecList, ddlTask); + } + } + } + + ExecuteTaskListOutsideTransaction( + ROW_MODIFY_NONE, + ddlTaskExecList, + MaxAdaptiveExecutorPoolSize, + NULL /* jobIdList (ignored by API implementation) */); +} + + +/* + * Create ShardGroup split children, perform copy and create auxiliary structures + * on a list of corresponding workers. + */ +static void +CreateAndCopySplitShardsForShardGroup(HTAB *mapOfShardToPlacementCreatedByWorkflow, + WorkerNode *sourceShardNode, + List *sourceColocatedShardIntervalList, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + CreateSplitShardsForShardGroup(mapOfShardToPlacementCreatedByWorkflow, + shardGroupSplitIntervalListList, + workersForPlacementList); + + DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList, + shardGroupSplitIntervalListList, workersForPlacementList); + + /* Create auxiliary structures (indexes, stats, replicaindentities, triggers) */ + CreateAuxiliaryStructuresForShardGroup(shardGroupSplitIntervalListList, + workersForPlacementList); +} + + +/* + * Perform Split Copy from source shard(s) to split children. + * 'sourceShardNode' : Source shard worker node. + * 'sourceColocatedShardIntervalList' : List of source shard intervals from shard group. + * 'shardGroupSplitIntervalListList' : List of shard intervals for split children. + * 'workersForPlacementList' : List of workers for split children placement. + */ +static void +DoSplitCopy(WorkerNode *sourceShardNode, List *sourceColocatedShardIntervalList, + List *shardGroupSplitIntervalListList, List *destinationWorkerNodesList) +{ + ShardInterval *sourceShardIntervalToCopy = NULL; + List *splitShardIntervalList = NIL; + + int taskId = 0; + List *splitCopyTaskList = NIL; + forboth_ptr(sourceShardIntervalToCopy, sourceColocatedShardIntervalList, + splitShardIntervalList, shardGroupSplitIntervalListList) + { + StringInfo splitCopyUdfCommand = CreateSplitCopyCommand(sourceShardIntervalToCopy, + splitShardIntervalList, + destinationWorkerNodesList); + + Task *splitCopyTask = CreateBasicTask( + sourceShardIntervalToCopy->shardId, /* jobId */ + taskId, + READ_TASK, + splitCopyUdfCommand->data); + + ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(taskPlacement, sourceShardNode); + + splitCopyTask->taskPlacementList = list_make1(taskPlacement); + + splitCopyTaskList = lappend(splitCopyTaskList, splitCopyTask); + taskId++; + } + + ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, splitCopyTaskList, + MaxAdaptiveExecutorPoolSize, + NULL /* jobIdList (ignored by API implementation) */); +} + + +/* + * Create Copy command for a given shard source shard to be copied to corresponding split children. + * 'sourceShardSplitInterval' : Source shard interval to be copied. + * 'splitChildrenShardINnerIntervalList' : List of shard intervals for split children. + * 'destinationWorkerNodesList' : List of workers for split children placement. + * Here is an example of a 2 way split copy : + * SELECT * from worker_split_copy( + * 81060000, -- source shard id to split copy + * ARRAY[ + * -- split copy info for split children 1 + * ROW(81060015, -- destination shard id + * -2147483648, -- split range begin + * 1073741823, --split range end + * 10 -- worker node id)::pg_catalog.split_copy_info, + * -- split copy info for split children 2 + * ROW(81060016, --destination shard id + * 1073741824, --split range begin + * 2147483647, --split range end + * 11 -- workef node id)::pg_catalog.split_copy_info + * ] + * ); + */ +static StringInfo +CreateSplitCopyCommand(ShardInterval *sourceShardSplitInterval, + List *splitChildrenShardIntervalList, + List *destinationWorkerNodesList) +{ + StringInfo splitCopyInfoArray = makeStringInfo(); + appendStringInfo(splitCopyInfoArray, "ARRAY["); + + ShardInterval *splitChildShardInterval = NULL; + bool addComma = false; + WorkerNode *destinationWorkerNode = NULL; + forboth_ptr(splitChildShardInterval, splitChildrenShardIntervalList, + destinationWorkerNode, destinationWorkerNodesList) + { + if (addComma) + { + appendStringInfo(splitCopyInfoArray, ","); + } + + StringInfo splitCopyInfoRow = makeStringInfo(); + appendStringInfo(splitCopyInfoRow, + "ROW(%lu, %d, %d, %u)::pg_catalog.split_copy_info", + splitChildShardInterval->shardId, + DatumGetInt32(splitChildShardInterval->minValue), + DatumGetInt32(splitChildShardInterval->maxValue), + destinationWorkerNode->nodeId); + appendStringInfo(splitCopyInfoArray, "%s", splitCopyInfoRow->data); + + addComma = true; + } + appendStringInfo(splitCopyInfoArray, "]"); + + StringInfo splitCopyUdf = makeStringInfo(); + appendStringInfo(splitCopyUdf, "SELECT pg_catalog.worker_split_copy(%lu, %s);", + sourceShardSplitInterval->shardId, + splitCopyInfoArray->data); + + return splitCopyUdf; +} + + +/* + * Create an object on a worker node. + */ +static void +CreateObjectOnPlacement(List *objectCreationCommandList, + WorkerNode *workerPlacementNode) +{ + char *currentUser = CurrentUserName(); + SendCommandListToWorkerOutsideTransaction(workerPlacementNode->workerName, + workerPlacementNode->workerPort, + currentUser, + objectCreationCommandList); +} + + +/* + * Create split children intervals for a shardgroup given list of split points. + * Example: + * 'sourceColocatedShardIntervalList': Colocated shard S1[-2147483648, 2147483647] & S2[-2147483648, 2147483647] + * 'splitPointsForShard': [0] (2 way split) + * 'shardGroupSplitIntervalListList': + * [ + * [ S1_1(-2147483648, 0), S1_2(1, 2147483647) ], // Split Interval List for S1. + * [ S2_1(-2147483648, 0), S2_2(1, 2147483647) ] // Split Interval List for S2. + * ] + */ +static List * +CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList, + List *splitPointsForShard) +{ + List *shardGroupSplitIntervalListList = NIL; + + ShardInterval *shardToSplitInterval = NULL; + foreach_ptr(shardToSplitInterval, sourceColocatedShardIntervalList) + { + List *shardSplitIntervalList = NIL; + CreateSplitIntervalsForShard(shardToSplitInterval, splitPointsForShard, + &shardSplitIntervalList); + + shardGroupSplitIntervalListList = lappend(shardGroupSplitIntervalListList, + shardSplitIntervalList); + } + + return shardGroupSplitIntervalListList; +} + + +/* + * Create split children intervals given a sourceshard and a list of split points. + * Example: SourceShard is range [0, 100] and SplitPoints are (15, 30) will give us: + * [(0, 15) (16, 30) (31, 100)] + */ +static void +CreateSplitIntervalsForShard(ShardInterval *sourceShard, + List *splitPointsForShard, + List **shardSplitChildrenIntervalList) +{ + /* For 'N' split points, we will have N+1 shard intervals created. */ + int shardIntervalCount = list_length(splitPointsForShard) + 1; + ListCell *splitPointCell = list_head(splitPointsForShard); + int32 splitParentMaxValue = DatumGetInt32(sourceShard->maxValue); + + int32 currentSplitChildMinValue = DatumGetInt32(sourceShard->minValue); + for (int index = 0; index < shardIntervalCount; index++) + { + ShardInterval *splitChildShardInterval = CopyShardInterval(sourceShard); + splitChildShardInterval->shardIndex = -1; + splitChildShardInterval->shardId = GetNextShardId(); + + splitChildShardInterval->minValueExists = true; + splitChildShardInterval->minValue = currentSplitChildMinValue; + splitChildShardInterval->maxValueExists = true; + + /* Length of splitPointsForShard is one less than 'shardIntervalCount' and we need to account */ + /* for 'splitPointCell' being NULL for last iteration. */ + if (splitPointCell) + { + splitChildShardInterval->maxValue = DatumGetInt32((Datum) lfirst( + splitPointCell)); + splitPointCell = lnext(splitPointsForShard, splitPointCell); + } + else + { + splitChildShardInterval->maxValue = splitParentMaxValue; + } + + currentSplitChildMinValue = splitChildShardInterval->maxValue + 1; + *shardSplitChildrenIntervalList = lappend(*shardSplitChildrenIntervalList, + splitChildShardInterval); + } +} + + +/* + * Insert new shard and placement metadata. + * Sync the Metadata with all nodes if enabled. + */ +static void +InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + List *shardIntervalList = NIL; + List *syncedShardList = NIL; + + /* + * Iterate over all the shards in the shard group. + */ + foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + { + /* + * Iterate on split shards list for a given shard and insert metadata. + */ + ShardInterval *shardInterval = NULL; + WorkerNode *workerPlacementNode = NULL; + forboth_ptr(shardInterval, shardIntervalList, workerPlacementNode, + workersForPlacementList) + { + InsertShardRow( + shardInterval->relationId, + shardInterval->shardId, + shardInterval->storageType, + IntegerToText(DatumGetInt32(shardInterval->minValue)), + IntegerToText(DatumGetInt32(shardInterval->maxValue))); + + InsertShardPlacementRow( + shardInterval->shardId, + INVALID_PLACEMENT_ID, /* triggers generation of new id */ + SHARD_STATE_ACTIVE, + 0, /* shard length (zero for HashDistributed Table) */ + workerPlacementNode->groupId); + + if (ShouldSyncTableMetadata(shardInterval->relationId)) + { + syncedShardList = lappend(syncedShardList, shardInterval); + } + } + } + + /* send commands to synced nodes one by one */ + List *splitOffShardMetadataCommandList = ShardListInsertCommand(syncedShardList); + char *command = NULL; + foreach_ptr(command, splitOffShardMetadataCommandList) + { + SendCommandToWorkersWithMetadata(command); + } +} + + +/* + * Create foreign key constraints on the split children shards. + */ +static void +CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + /* Create constraints between shards */ + List *shardIntervalList = NIL; + + /* + * Iterate over all the shards in the shard group. + */ + foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + { + ShardInterval *shardInterval = NULL; + WorkerNode *workerPlacementNode = NULL; + + /* + * Iterate on split shards list for a given shard and create constraints. + */ + forboth_ptr(shardInterval, shardIntervalList, workerPlacementNode, + workersForPlacementList) + { + List *shardForeignConstraintCommandList = NIL; + List *referenceTableForeignConstraintList = NIL; + + CopyShardForeignConstraintCommandListGrouped(shardInterval, + & + shardForeignConstraintCommandList, + & + referenceTableForeignConstraintList); + + List *constraintCommandList = NIL; + constraintCommandList = list_concat(constraintCommandList, + shardForeignConstraintCommandList); + constraintCommandList = list_concat(constraintCommandList, + referenceTableForeignConstraintList); + + char *constraintCommand = NULL; + foreach_ptr(constraintCommand, constraintCommandList) + { + SendCommandToWorker( + workerPlacementNode->workerName, + workerPlacementNode->workerPort, + constraintCommand); + } + } + } +} + + +/* + * DropShardList drops shards and their metadata from both the coordinator and + * mx nodes. + */ +void +DropShardList(List *shardIntervalList) +{ + ListCell *shardIntervalCell = NULL; + + foreach(shardIntervalCell, shardIntervalList) + { + ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); + ListCell *shardPlacementCell = NULL; + Oid relationId = shardInterval->relationId; + uint64 oldShardId = shardInterval->shardId; + + /* delete metadata from synced nodes */ + if (ShouldSyncTableMetadata(relationId)) + { + ListCell *commandCell = NULL; + + /* send the commands one by one (calls citus_internal_delete_shard_metadata internally) */ + List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval); + foreach(commandCell, shardMetadataDeleteCommandList) + { + char *command = (char *) lfirst(commandCell); + SendCommandToWorkersWithMetadata(command); + } + } + + /* delete shard placements and drop shards */ + List *shardPlacementList = ActiveShardPlacementList(oldShardId); + foreach(shardPlacementCell, shardPlacementList) + { + ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); + char *workerName = placement->nodeName; + uint32 workerPort = placement->nodePort; + StringInfo dropQuery = makeStringInfo(); + + DeleteShardPlacementRow(placement->placementId); + + /* get shard name */ + char *qualifiedShardName = ConstructQualifiedShardName(shardInterval); + + char storageType = shardInterval->storageType; + if (storageType == SHARD_STORAGE_TABLE) + { + appendStringInfo(dropQuery, DROP_REGULAR_TABLE_COMMAND, + qualifiedShardName); + } + else if (storageType == SHARD_STORAGE_FOREIGN) + { + appendStringInfo(dropQuery, DROP_FOREIGN_TABLE_COMMAND, + qualifiedShardName); + } + + /* drop old shard */ + SendCommandToWorker(workerName, workerPort, dropQuery->data); + } + + /* delete shard row */ + DeleteShardRow(oldShardId); + } +} + + +/* + * In case of failure, DropShardPlacementList drops shard placements and their metadata from both the + * coordinator and mx nodes. + */ +static void +TryDropSplitShardsOnFailure(HTAB *mapOfShardToPlacementCreatedByWorkflow) +{ + HASH_SEQ_STATUS status; + ShardCreatedByWorkflowEntry *entry; + + hash_seq_init(&status, mapOfShardToPlacementCreatedByWorkflow); + while ((entry = (ShardCreatedByWorkflowEntry *) hash_seq_search(&status)) != 0) + { + ShardInterval *shardInterval = entry->shardIntervalKey; + WorkerNode *workerPlacementNode = entry->workerNodeValue; + + char *qualifiedShardName = ConstructQualifiedShardName(shardInterval); + StringInfo dropShardQuery = makeStringInfo(); + + /* Caller enforces that foreign tables cannot be split (use DROP_REGULAR_TABLE_COMMAND) */ + appendStringInfo(dropShardQuery, DROP_REGULAR_TABLE_COMMAND, + qualifiedShardName); + + int connectionFlags = FOR_DDL; + connectionFlags |= OUTSIDE_TRANSACTION; + MultiConnection *connnection = GetNodeUserDatabaseConnection( + connectionFlags, + workerPlacementNode->workerName, + workerPlacementNode->workerPort, + CurrentUserName(), + NULL /* databaseName */); + + /* + * Perform a drop in best effort manner. + * The shard may or may not exist and the connection could have died. + */ + ExecuteOptionalRemoteCommand( + connnection, + dropShardQuery->data, + NULL /* pgResult */); + } +} diff --git a/src/backend/distributed/operations/worker_shard_copy.c b/src/backend/distributed/operations/worker_shard_copy.c new file mode 100644 index 000000000..9239caffb --- /dev/null +++ b/src/backend/distributed/operations/worker_shard_copy.c @@ -0,0 +1,483 @@ +/*------------------------------------------------------------------------- + * + * worker_shard_copy.c + * Functions for copying a shard to destination. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "libpq-fe.h" +#include "postgres.h" +#include "commands/copy.h" +#include "nodes/makefuncs.h" +#include "parser/parse_relation.h" +#include "utils/lsyscache.h" +#include "utils/builtins.h" +#include "distributed/remote_commands.h" +#include "distributed/worker_shard_copy.h" +#include "distributed/commands/multi_copy.h" +#include "distributed/local_multi_copy.h" +#include "distributed/worker_manager.h" +#include "distributed/connection_management.h" +#include "distributed/relation_utils.h" +#include "distributed/version_compat.h" +#include "distributed/local_executor.h" + +/* + * LocalCopyBuffer is used in copy callback to return the copied rows. + * The reason this is a global variable is that we cannot pass an additional + * argument to the copy callback. + */ +static StringInfo LocalCopyBuffer; + +typedef struct ShardCopyDestReceiver +{ + /* public DestReceiver interface */ + DestReceiver pub; + + /* Destination Relation Name */ + List *destinationShardFullyQualifiedName; + + /* descriptor of the tuples that are sent to the worker */ + TupleDesc tupleDescriptor; + + /* state on how to copy out data types */ + CopyOutState copyOutState; + FmgrInfo *columnOutputFunctions; + + /* number of tuples sent */ + int64 tuplesSent; + + /* destination node id */ + uint32_t destinationNodeId; + + /* local copy if destination shard in same node */ + bool useLocalCopy; + + /* EState for per-tuple memory allocation */ + EState *executorState; + + /* + * Connection for destination shard (NULL if useLocalCopy is true) + */ + MultiConnection *connection; +} ShardCopyDestReceiver; + +static bool ShardCopyDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest); +static void ShardCopyDestReceiverStartup(DestReceiver *dest, int operation, + TupleDesc inputTupleDescriptor); +static void ShardCopyDestReceiverShutdown(DestReceiver *destReceiver); +static void ShardCopyDestReceiverDestroy(DestReceiver *destReceiver); +static bool CanUseLocalCopy(uint32_t destinationNodeId); +static StringInfo ConstructShardCopyStatement(List *destinationShardFullyQualifiedName, + bool + useBinaryFormat); +static void WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest); +static int ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead); +static void LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState + localCopyOutState); +static void ConnectToRemoteAndStartCopy(ShardCopyDestReceiver *copyDest); + +static bool +CanUseLocalCopy(uint32_t destinationNodeId) +{ + /* If destination node is same as source, use local copy */ + return GetLocalNodeId() == (int32) destinationNodeId; +} + + +/* Connect to node with source shard and trigger copy start. */ +static void +ConnectToRemoteAndStartCopy(ShardCopyDestReceiver *copyDest) +{ + int connectionFlags = OUTSIDE_TRANSACTION; + char *currentUser = CurrentUserName(); + WorkerNode *workerNode = FindNodeWithNodeId(copyDest->destinationNodeId, + false /* missingOk */); + copyDest->connection = GetNodeUserDatabaseConnection(connectionFlags, + workerNode->workerName, + workerNode->workerPort, + currentUser, + NULL /* database (current) */); + ClaimConnectionExclusively(copyDest->connection); + + StringInfo copyStatement = ConstructShardCopyStatement( + copyDest->destinationShardFullyQualifiedName, + copyDest->copyOutState->binary); + + if (!SendRemoteCommand(copyDest->connection, copyStatement->data)) + { + ReportConnectionError(copyDest->connection, ERROR); + } + + PGresult *result = GetRemoteCommandResult(copyDest->connection, + true /* raiseInterrupts */); + if (PQresultStatus(result) != PGRES_COPY_IN) + { + ReportResultError(copyDest->connection, result, ERROR); + } + + PQclear(result); +} + + +/* + * CreateShardCopyDestReceiver creates a DestReceiver that copies into + * a destinationShardFullyQualifiedName on destinationNodeId. + */ +DestReceiver * +CreateShardCopyDestReceiver(EState *executorState, + List *destinationShardFullyQualifiedName, + uint32_t destinationNodeId) +{ + ShardCopyDestReceiver *copyDest = (ShardCopyDestReceiver *) palloc0( + sizeof(ShardCopyDestReceiver)); + + /* set up the DestReceiver function pointers */ + copyDest->pub.receiveSlot = ShardCopyDestReceiverReceive; + copyDest->pub.rStartup = ShardCopyDestReceiverStartup; + copyDest->pub.rShutdown = ShardCopyDestReceiverShutdown; + copyDest->pub.rDestroy = ShardCopyDestReceiverDestroy; + copyDest->pub.mydest = DestCopyOut; + copyDest->executorState = executorState; + + copyDest->destinationNodeId = destinationNodeId; + copyDest->destinationShardFullyQualifiedName = destinationShardFullyQualifiedName; + copyDest->tuplesSent = 0; + copyDest->connection = NULL; + copyDest->useLocalCopy = CanUseLocalCopy(destinationNodeId); + + return (DestReceiver *) copyDest; +} + + +/* + * ShardCopyDestReceiverReceive implements the receiveSlot function of + * ShardCopyDestReceiver. It takes a TupleTableSlot and sends the contents to + * the appropriate destination node. + */ +static bool +ShardCopyDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) +{ + ShardCopyDestReceiver *copyDest = (ShardCopyDestReceiver *) dest; + + /* + * Switch to a per-tuple memory memory context. When used in + * context of Split Copy, this is a no-op as switch is already done. + */ + EState *executorState = copyDest->executorState; + MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState); + MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); + + /* If remote copy, connect lazily and initiate copy */ + if (copyDest->tuplesSent == 0 && (!copyDest->useLocalCopy)) + { + ConnectToRemoteAndStartCopy(copyDest); + } + + slot_getallattrs(slot); + Datum *columnValues = slot->tts_values; + bool *columnNulls = slot->tts_isnull; + + CopyOutState copyOutState = copyDest->copyOutState; + if (copyDest->useLocalCopy) + { + WriteLocalTuple(slot, copyDest); + if (copyOutState->fe_msgbuf->len > LocalCopyFlushThresholdByte) + { + LocalCopyToShard(copyDest, copyOutState); + } + } + else + { + resetStringInfo(copyOutState->fe_msgbuf); + if (copyDest->copyOutState->binary && copyDest->tuplesSent == 0) + { + AppendCopyBinaryHeaders(copyDest->copyOutState); + } + + AppendCopyRowData(columnValues, + columnNulls, + copyDest->tupleDescriptor, + copyOutState, + copyDest->columnOutputFunctions, + NULL /* columnCoercionPaths */); + if (!PutRemoteCopyData(copyDest->connection, copyOutState->fe_msgbuf->data, + copyOutState->fe_msgbuf->len)) + { + char *destinationShardSchemaName = linitial( + copyDest->destinationShardFullyQualifiedName); + char *destinationShardRelationName = lsecond( + copyDest->destinationShardFullyQualifiedName); + + char *errorMessage = PQerrorMessage(copyDest->connection->pgConn); + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), + errmsg("Failed to COPY to shard %s.%s : %s,", + destinationShardSchemaName, + destinationShardRelationName, + errorMessage), + errdetail("failed to send %d bytes %s on node %u", + copyOutState->fe_msgbuf->len, + copyOutState->fe_msgbuf->data, + copyDest->destinationNodeId))); + } + } + + MemoryContextSwitchTo(oldContext); + ResetPerTupleExprContext(executorState); + + copyDest->tuplesSent++; + return true; +} + + +/* + * ShardCopyDestReceiverStartup implements the rStartup interface of ShardCopyDestReceiver. + */ +static void +ShardCopyDestReceiverStartup(DestReceiver *dest, int operation, TupleDesc + inputTupleDescriptor) +{ + ShardCopyDestReceiver *copyDest = (ShardCopyDestReceiver *) dest; + copyDest->tupleDescriptor = inputTupleDescriptor; + copyDest->tuplesSent = 0; + + const char *delimiterCharacter = "\t"; + const char *nullPrintCharacter = "\\N"; + + /* define how tuples will be serialised */ + CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); + copyOutState->binary = EnableBinaryProtocol && CanUseBinaryCopyFormat( + inputTupleDescriptor); + copyOutState->null_print = (char *) nullPrintCharacter; + copyOutState->null_print_client = (char *) nullPrintCharacter; + copyOutState->fe_msgbuf = makeStringInfo(); + copyOutState->delim = (char *) delimiterCharacter; + copyOutState->rowcontext = GetPerTupleMemoryContext(copyDest->executorState); + copyDest->columnOutputFunctions = ColumnOutputFunctions(inputTupleDescriptor, + copyOutState->binary); + copyDest->copyOutState = copyOutState; +} + + +/* + * ShardCopyDestReceiverShutdown implements the rShutdown interface of + * ShardCopyDestReceiver. It ends all open COPY operations, copying any pending + * data in buffer. + */ +static void +ShardCopyDestReceiverShutdown(DestReceiver *dest) +{ + ShardCopyDestReceiver *copyDest = (ShardCopyDestReceiver *) dest; + + if (copyDest->useLocalCopy) + { + if (copyDest->copyOutState != NULL && + copyDest->copyOutState->fe_msgbuf->len > 0) + { + /* end the COPY input */ + LocalCopyToShard(copyDest, copyDest->copyOutState); + } + } + else if (copyDest->connection != NULL) + { + resetStringInfo(copyDest->copyOutState->fe_msgbuf); + if (copyDest->copyOutState->binary) + { + AppendCopyBinaryFooters(copyDest->copyOutState); + } + + /* end the COPY input */ + if (!PutRemoteCopyEnd(copyDest->connection, NULL /* errormsg */)) + { + char *destinationShardSchemaName = linitial( + copyDest->destinationShardFullyQualifiedName); + char *destinationShardRelationName = lsecond( + copyDest->destinationShardFullyQualifiedName); + + ereport(ERROR, (errcode(ERRCODE_IO_ERROR), + errmsg("Failed to COPY to destination shard %s.%s", + destinationShardSchemaName, + destinationShardRelationName), + errdetail("failed to send %d bytes %s on node %u", + copyDest->copyOutState->fe_msgbuf->len, + copyDest->copyOutState->fe_msgbuf->data, + copyDest->destinationNodeId))); + } + + /* check whether there were any COPY errors */ + PGresult *result = GetRemoteCommandResult(copyDest->connection, + true /* raiseInterrupts */); + if (PQresultStatus(result) != PGRES_COMMAND_OK) + { + ReportCopyError(copyDest->connection, result); + } + + PQclear(result); + ForgetResults(copyDest->connection); + CloseConnection(copyDest->connection); + } +} + + +/* + * ShardCopyDestReceiverDestroy frees the DestReceiver. + */ +static void +ShardCopyDestReceiverDestroy(DestReceiver *dest) +{ + ShardCopyDestReceiver *copyDest = (ShardCopyDestReceiver *) dest; + + if (copyDest->copyOutState) + { + pfree(copyDest->copyOutState); + } + + if (copyDest->columnOutputFunctions) + { + pfree(copyDest->columnOutputFunctions); + } + + pfree(copyDest); +} + + +/* + * ConstructShardCopyStatement constructs the text of a COPY statement + * for copying into a result table + */ +static StringInfo +ConstructShardCopyStatement(List *destinationShardFullyQualifiedName, bool + useBinaryFormat) +{ + char *destinationShardSchemaName = linitial(destinationShardFullyQualifiedName); + char *destinationShardRelationName = lsecond(destinationShardFullyQualifiedName); + + StringInfo command = makeStringInfo(); + appendStringInfo(command, "COPY %s.%s FROM STDIN", + quote_identifier(destinationShardSchemaName), quote_identifier( + destinationShardRelationName)); + + if (useBinaryFormat) + { + appendStringInfo(command, " WITH (format binary);"); + } + else + { + appendStringInfo(command, ";"); + } + + return command; +} + + +/* Write Tuple to Local Shard. */ +static void +WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest) +{ + CopyOutState localCopyOutState = copyDest->copyOutState; + + /* + * Since we are doing a local copy, the following statements should + * use local execution to see the changes + */ + SetLocalExecutionStatus(LOCAL_EXECUTION_REQUIRED); + + bool isBinaryCopy = localCopyOutState->binary; + bool shouldAddBinaryHeaders = (isBinaryCopy && localCopyOutState->fe_msgbuf->len == + 0); + if (shouldAddBinaryHeaders) + { + AppendCopyBinaryHeaders(localCopyOutState); + } + + Datum *columnValues = slot->tts_values; + bool *columnNulls = slot->tts_isnull; + FmgrInfo *columnOutputFunctions = copyDest->columnOutputFunctions; + + AppendCopyRowData(columnValues, columnNulls, copyDest->tupleDescriptor, + localCopyOutState, columnOutputFunctions, + NULL /* columnCoercionPaths */); +} + + +/* + * LocalCopyToShard performs local copy for the given destination shard. + */ +static void +LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState localCopyOutState) +{ + bool isBinaryCopy = localCopyOutState->binary; + if (isBinaryCopy) + { + AppendCopyBinaryFooters(localCopyOutState); + } + + /* + * Set the buffer as a global variable to allow ReadFromLocalBufferCallback + * to read from it. We cannot pass additional arguments to + * ReadFromLocalBufferCallback. + */ + LocalCopyBuffer = localCopyOutState->fe_msgbuf; + + char *destinationShardSchemaName = linitial( + copyDest->destinationShardFullyQualifiedName); + char *destinationShardRelationName = lsecond( + copyDest->destinationShardFullyQualifiedName); + + Oid destinationSchemaOid = get_namespace_oid(destinationShardSchemaName, + false /* missing_ok */); + Oid destinationShardOid = get_relname_relid(destinationShardRelationName, + destinationSchemaOid); + + DefElem *binaryFormatOption = NULL; + if (isBinaryCopy) + { + binaryFormatOption = makeDefElem("format", (Node *) makeString("binary"), -1); + } + + Relation shard = table_open(destinationShardOid, RowExclusiveLock); + ParseState *pState = make_parsestate(NULL /* parentParseState */); + (void) addRangeTableEntryForRelation(pState, shard, AccessShareLock, + NULL /* alias */, false /* inh */, + false /* inFromCl */); + + List *options = (isBinaryCopy) ? list_make1(binaryFormatOption) : NULL; + CopyFromState cstate = BeginCopyFrom_compat(pState, shard, + NULL /* whereClause */, + NULL /* fileName */, + false /* is_program */, + ReadFromLocalBufferCallback, + NULL /* attlist (NULL is all columns) */, + options); + CopyFrom(cstate); + EndCopyFrom(cstate); + resetStringInfo(localCopyOutState->fe_msgbuf); + + table_close(shard, NoLock); + free_parsestate(pState); +} + + +/* + * ReadFromLocalBufferCallback is the copy callback. + * It always tries to copy maxRead bytes. + */ +static int +ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead) +{ + int bytesRead = 0; + int avail = LocalCopyBuffer->len - LocalCopyBuffer->cursor; + int bytesToRead = Min(avail, maxRead); + if (bytesToRead > 0) + { + memcpy_s(outBuf, bytesToRead, + &LocalCopyBuffer->data[LocalCopyBuffer->cursor], bytesToRead); + } + bytesRead += bytesToRead; + LocalCopyBuffer->cursor += bytesToRead; + + return bytesRead; +} diff --git a/src/backend/distributed/operations/worker_split_copy_udf.c b/src/backend/distributed/operations/worker_split_copy_udf.c new file mode 100644 index 000000000..2b33654f9 --- /dev/null +++ b/src/backend/distributed/operations/worker_split_copy_udf.c @@ -0,0 +1,264 @@ +/*------------------------------------------------------------------------- + * + * worker_split_copy_udf.c + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "pg_version_compat.h" +#include "utils/lsyscache.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "distributed/utils/array_type.h" +#include "distributed/listutils.h" +#include "distributed/multi_executor.h" +#include "distributed/worker_shard_copy.h" +#include "distributed/intermediate_results.h" +#include "distributed/citus_ruleutils.h" + +PG_FUNCTION_INFO_V1(worker_split_copy); + +typedef struct SplitCopyInfo +{ + uint64 destinationShardId; /* destination shard id */ + Datum destinationShardMinHashValue; /* min hash value of destination shard */ + Datum destinationShardMaxHashValue; /* max hash value of destination shard */ + uint32_t destinationShardNodeId; /* node where split child shard is to be placed */ +} SplitCopyInfo; + +static void ParseSplitCopyInfoDatum(Datum splitCopyInfoDatum, + SplitCopyInfo **splitCopyInfo); +static DestReceiver ** CreateShardCopyDestReceivers(EState *estate, + ShardInterval * + shardIntervalToSplitCopy, + List *splitCopyInfoList); +static DestReceiver * CreatePartitionedSplitCopyDestReceiver(EState *executor, + ShardInterval * + shardIntervalToSplitCopy, + List *splitCopyInfoList); +static void BuildMinMaxRangeArrays(List *splitCopyInfoList, ArrayType **minValueArray, + ArrayType **maxValueArray); + +/* + * worker_split_copy(source_shard_id bigint, splitCopyInfo pg_catalog.split_copy_info[]) + * UDF to split copy shard to list of destination shards. + * 'source_shard_id' : Source ShardId to split copy. + * 'splitCopyInfos' : Array of Split Copy Info (destination_shard's id, min/max ranges and node_id) + */ +Datum +worker_split_copy(PG_FUNCTION_ARGS) +{ + uint64 shardIdToSplitCopy = DatumGetUInt64(PG_GETARG_DATUM(0)); + ShardInterval *shardIntervalToSplitCopy = LoadShardInterval(shardIdToSplitCopy); + + ArrayType *splitCopyInfoArrayObject = PG_GETARG_ARRAYTYPE_P(1); + bool arrayHasNull = ARR_HASNULL(splitCopyInfoArrayObject); + if (arrayHasNull) + { + ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg( + "pg_catalog.split_copy_info array cannot contain null values"))); + } + + const int slice_ndim = 0; + ArrayMetaState *mState = NULL; + ArrayIterator copyInfo_iterator = array_create_iterator(splitCopyInfoArrayObject, + slice_ndim, + mState); + Datum copyInfoDatum = 0; + bool isnull = false; + List *splitCopyInfoList = NIL; + while (array_iterate(copyInfo_iterator, ©InfoDatum, &isnull)) + { + SplitCopyInfo *splitCopyInfo = NULL; + ParseSplitCopyInfoDatum(copyInfoDatum, &splitCopyInfo); + + splitCopyInfoList = lappend(splitCopyInfoList, splitCopyInfo); + } + + EState *executor = CreateExecutorState(); + DestReceiver *splitCopyDestReceiver = CreatePartitionedSplitCopyDestReceiver(executor, + shardIntervalToSplitCopy, + splitCopyInfoList); + + Oid sourceShardToCopySchemaOId = get_rel_namespace( + shardIntervalToSplitCopy->relationId); + char *sourceShardToCopySchemaName = get_namespace_name(sourceShardToCopySchemaOId); + char *sourceShardToCopyName = get_rel_name(shardIntervalToSplitCopy->relationId); + AppendShardIdToName(&sourceShardToCopyName, shardIdToSplitCopy); + char *sourceShardToCopyQualifiedName = quote_qualified_identifier( + sourceShardToCopySchemaName, + sourceShardToCopyName); + + StringInfo selectShardQueryForCopy = makeStringInfo(); + appendStringInfo(selectShardQueryForCopy, + "SELECT * FROM %s;", sourceShardToCopyQualifiedName); + + ParamListInfo params = NULL; + ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params, + (DestReceiver *) splitCopyDestReceiver); + + FreeExecutorState(executor); + + PG_RETURN_VOID(); +} + + +/* Parse a single SplitCopyInfo Tuple */ +static void +ParseSplitCopyInfoDatum(Datum splitCopyInfoDatum, SplitCopyInfo **splitCopyInfo) +{ + HeapTupleHeader dataTuple = DatumGetHeapTupleHeader(splitCopyInfoDatum); + + SplitCopyInfo *copyInfo = palloc0(sizeof(SplitCopyInfo)); + + bool isnull = false; + Datum destinationShardIdDatum = GetAttributeByName(dataTuple, "destination_shard_id", + &isnull); + if (isnull) + { + ereport(ERROR, (errmsg( + "destination_shard_id for pg_catalog.split_copy_info cannot be null."))); + } + copyInfo->destinationShardId = DatumGetUInt64(destinationShardIdDatum); + + Datum minValueDatum = GetAttributeByName(dataTuple, "destination_shard_min_value", + &isnull); + if (isnull) + { + ereport(ERROR, (errmsg( + "destination_shard_min_value for pg_catalog.split_copy_info cannot be null."))); + } + copyInfo->destinationShardMinHashValue = minValueDatum; + + Datum maxValueDatum = GetAttributeByName(dataTuple, "destination_shard_max_value", + &isnull); + if (isnull) + { + ereport(ERROR, (errmsg( + "destination_shard_max_value for pg_catalog.split_copy_info cannot be null."))); + } + copyInfo->destinationShardMaxHashValue = maxValueDatum; + + Datum nodeIdDatum = GetAttributeByName(dataTuple, "destination_shard_node_id", + &isnull); + if (isnull) + { + ereport(ERROR, (errmsg( + "destination_shard_node_id for pg_catalog.split_copy_info cannot be null."))); + } + copyInfo->destinationShardNodeId = DatumGetInt32(nodeIdDatum); + + *splitCopyInfo = copyInfo; +} + + +/* Build 'min/max' hash range arrays for PartitionedResultDestReceiver */ +static void +BuildMinMaxRangeArrays(List *splitCopyInfoList, ArrayType **minValueArray, + ArrayType **maxValueArray) +{ + int partitionCount = list_length(splitCopyInfoList); + + Datum *minValues = palloc0(partitionCount * sizeof(Datum)); + bool *minValueNulls = palloc0(partitionCount * sizeof(bool)); + Datum *maxValues = palloc0(partitionCount * sizeof(Datum)); + bool *maxValueNulls = palloc0(partitionCount * sizeof(bool)); + + SplitCopyInfo *splitCopyInfo = NULL; + int index = 0; + foreach_ptr(splitCopyInfo, splitCopyInfoList) + { + minValues[index] = splitCopyInfo->destinationShardMinHashValue; + maxValues[index] = splitCopyInfo->destinationShardMaxHashValue; + + /* Caller enforces that min/max values will be not-null */ + minValueNulls[index] = false; + maxValueNulls[index] = false; + index++; + } + + *minValueArray = CreateArrayFromDatums(minValues, minValueNulls, partitionCount, + TEXTOID); + *maxValueArray = CreateArrayFromDatums(maxValues, maxValueNulls, partitionCount, + TEXTOID); +} + + +/* + * Create underlying ShardCopyDestReceivers for PartitionedResultDestReceiver + * Each ShardCopyDestReceivers will be responsible for copying tuples from source shard, + * that fall under its min/max range, to specified destination shard. + */ +static DestReceiver ** +CreateShardCopyDestReceivers(EState *estate, ShardInterval *shardIntervalToSplitCopy, + List *splitCopyInfoList) +{ + DestReceiver **shardCopyDests = palloc0(splitCopyInfoList->length * + sizeof(DestReceiver *)); + + SplitCopyInfo *splitCopyInfo = NULL; + int index = 0; + char *sourceShardNamePrefix = get_rel_name(shardIntervalToSplitCopy->relationId); + foreach_ptr(splitCopyInfo, splitCopyInfoList) + { + Oid destinationShardSchemaOid = get_rel_namespace( + shardIntervalToSplitCopy->relationId); + char *destinationShardSchemaName = get_namespace_name(destinationShardSchemaOid); + char *destinationShardNameCopy = pstrdup(sourceShardNamePrefix); + AppendShardIdToName(&destinationShardNameCopy, splitCopyInfo->destinationShardId); + + DestReceiver *shardCopyDest = CreateShardCopyDestReceiver( + estate, + list_make2(destinationShardSchemaName, destinationShardNameCopy), + splitCopyInfo->destinationShardNodeId); + + shardCopyDests[index] = shardCopyDest; + index++; + } + + return shardCopyDests; +} + + +/* Create PartitionedSplitCopyDestReceiver along with underlying ShardCopyDestReceivers */ +static DestReceiver * +CreatePartitionedSplitCopyDestReceiver(EState *estate, + ShardInterval *shardIntervalToSplitCopy, + List *splitCopyInfoList) +{ + /* Create underlying ShardCopyDestReceivers */ + DestReceiver **shardCopyDestReceivers = CreateShardCopyDestReceivers( + estate, + shardIntervalToSplitCopy, + splitCopyInfoList); + + /* construct an artificial CitusTableCacheEntry for routing tuples to appropriate ShardCopyReceiver */ + ArrayType *minValuesArray = NULL; + ArrayType *maxValuesArray = NULL; + BuildMinMaxRangeArrays(splitCopyInfoList, &minValuesArray, &maxValuesArray); + CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry( + shardIntervalToSplitCopy->relationId); + char partitionMethod = cacheEntry->partitionMethod; + Var *partitionColumn = cacheEntry->partitionColumn; + + CitusTableCacheEntry *shardSearchInfo = + QueryTupleShardSearchInfo(minValuesArray, maxValuesArray, + partitionMethod, partitionColumn); + + /* Construct PartitionedResultDestReceiver from cache and underlying ShardCopyDestReceivers */ + int partitionColumnIndex = partitionColumn->varattno - 1; + int partitionCount = splitCopyInfoList->length; + DestReceiver *splitCopyDestReceiver = CreatePartitionedResultDestReceiver( + partitionColumnIndex, + partitionCount, + shardSearchInfo, + shardCopyDestReceivers, + true /* lazyStartup */, + false /* allowNullPartitionColumnValues */); + + return splitCopyDestReceiver; +} diff --git a/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql index 625a62ca7..a9c9108a0 100644 --- a/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql +++ b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql @@ -65,3 +65,5 @@ DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_ OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8); #include "udfs/get_all_active_transactions/11.1-1.sql" +#include "udfs/citus_split_shard_by_split_points/11.1-1.sql" +#include "udfs/worker_split_copy/11.1-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql index 5c0390061..f9b2f19d5 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql @@ -63,6 +63,15 @@ ALTER EXTENSION citus ADD FUNCTION citus_internal.upgrade_columnar_storage; ALTER EXTENSION citus ADD FUNCTION citus_internal.downgrade_columnar_storage; ALTER EXTENSION citus ADD FUNCTION citus_internal.columnar_ensure_am_depends_catalog; +DROP FUNCTION pg_catalog.citus_split_shard_by_split_points( + shard_id bigint, + split_points text[], + node_ids integer[], + shard_transfer_mode citus.shard_transfer_mode); +DROP FUNCTION pg_catalog.worker_split_copy( + source_shard_id bigint, + splitCopyInfos pg_catalog.split_copy_info[]); +DROP TYPE pg_catalog.split_copy_info; DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, diff --git a/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.1-1.sql b/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.1-1.sql new file mode 100644 index 000000000..36624c40e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.1-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_split_shard_by_split_points( + shard_id bigint, + split_points text[], + -- A 'nodeId' is a uint32 in CITUS [1, 4294967296] but postgres does not have unsigned type support. + -- Use integer (consistent with other previously defined UDFs that take nodeId as integer) as for all practical purposes it is big enough. + node_ids integer[], + -- Three modes to be implemented: block_writes, force_logical and auto. + -- Currently, the default / only supported mode is block_writes. + shard_transfer_mode citus.shard_transfer_mode default 'block_writes') +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_split_shard_by_split_points$$; +COMMENT ON FUNCTION pg_catalog.citus_split_shard_by_split_points(shard_id bigint, split_points text[], nodeIds integer[], citus.shard_transfer_mode) + IS 'split a shard using split mode.'; diff --git a/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/latest.sql b/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/latest.sql new file mode 100644 index 000000000..36624c40e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/latest.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_split_shard_by_split_points( + shard_id bigint, + split_points text[], + -- A 'nodeId' is a uint32 in CITUS [1, 4294967296] but postgres does not have unsigned type support. + -- Use integer (consistent with other previously defined UDFs that take nodeId as integer) as for all practical purposes it is big enough. + node_ids integer[], + -- Three modes to be implemented: block_writes, force_logical and auto. + -- Currently, the default / only supported mode is block_writes. + shard_transfer_mode citus.shard_transfer_mode default 'block_writes') +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_split_shard_by_split_points$$; +COMMENT ON FUNCTION pg_catalog.citus_split_shard_by_split_points(shard_id bigint, split_points text[], nodeIds integer[], citus.shard_transfer_mode) + IS 'split a shard using split mode.'; diff --git a/src/backend/distributed/sql/udfs/worker_split_copy/11.1-1.sql b/src/backend/distributed/sql/udfs/worker_split_copy/11.1-1.sql new file mode 100644 index 000000000..0ecad4a07 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_split_copy/11.1-1.sql @@ -0,0 +1,22 @@ +-- We want to create the type in pg_catalog but doing that leads to an error +-- "ERROR: permission denied to create "pg_catalog.split_copy_info" +-- "DETAIL: System catalog modifications are currently disallowed. "" +-- As a workaround, we create the type in the citus schema and then later modify it to pg_catalog. +DROP TYPE IF EXISTS citus.split_copy_info; +CREATE TYPE citus.split_copy_info AS ( + destination_shard_id bigint, + destination_shard_min_value text, + destination_shard_max_value text, + -- A 'nodeId' is a uint32 in CITUS [1, 4294967296] but postgres does not have unsigned type support. + -- Use integer (consistent with other previously defined UDFs that take nodeId as integer) as for all practical purposes it is big enough. + destination_shard_node_id integer); +ALTER TYPE citus.split_copy_info SET SCHEMA pg_catalog; + +CREATE OR REPLACE FUNCTION pg_catalog.worker_split_copy( + source_shard_id bigint, + splitCopyInfos pg_catalog.split_copy_info[]) +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$worker_split_copy$$; +COMMENT ON FUNCTION pg_catalog.worker_split_copy(source_shard_id bigint, splitCopyInfos pg_catalog.split_copy_info[]) + IS 'Perform split copy for shard'; diff --git a/src/backend/distributed/sql/udfs/worker_split_copy/latest.sql b/src/backend/distributed/sql/udfs/worker_split_copy/latest.sql new file mode 100644 index 000000000..0ecad4a07 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_split_copy/latest.sql @@ -0,0 +1,22 @@ +-- We want to create the type in pg_catalog but doing that leads to an error +-- "ERROR: permission denied to create "pg_catalog.split_copy_info" +-- "DETAIL: System catalog modifications are currently disallowed. "" +-- As a workaround, we create the type in the citus schema and then later modify it to pg_catalog. +DROP TYPE IF EXISTS citus.split_copy_info; +CREATE TYPE citus.split_copy_info AS ( + destination_shard_id bigint, + destination_shard_min_value text, + destination_shard_max_value text, + -- A 'nodeId' is a uint32 in CITUS [1, 4294967296] but postgres does not have unsigned type support. + -- Use integer (consistent with other previously defined UDFs that take nodeId as integer) as for all practical purposes it is big enough. + destination_shard_node_id integer); +ALTER TYPE citus.split_copy_info SET SCHEMA pg_catalog; + +CREATE OR REPLACE FUNCTION pg_catalog.worker_split_copy( + source_shard_id bigint, + splitCopyInfos pg_catalog.split_copy_info[]) +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$worker_split_copy$$; +COMMENT ON FUNCTION pg_catalog.worker_split_copy(source_shard_id bigint, splitCopyInfos pg_catalog.split_copy_info[]) + IS 'Perform split copy for shard'; diff --git a/src/backend/distributed/utils/array_type.c b/src/backend/distributed/utils/array_type.c index eed2e3fdf..b214d2ee7 100644 --- a/src/backend/distributed/utils/array_type.c +++ b/src/backend/distributed/utils/array_type.c @@ -12,8 +12,12 @@ #include "postgres.h" #include "miscadmin.h" +#include "pg_version_compat.h" +#include "catalog/pg_type.h" +#include "nodes/pg_list.h" #include "distributed/utils/array_type.h" #include "utils/array.h" +#include "utils/builtins.h" #include "utils/lsyscache.h" @@ -96,3 +100,42 @@ DatumArrayToArrayType(Datum *datumArray, int datumCount, Oid datumTypeId) return arrayObject; } + + +/* + * Converts ArrayType to List. + */ +List * +IntegerArrayTypeToList(ArrayType *arrayObject) +{ + List *list = NULL; + Datum *datumObjectArray = DeconstructArrayObject(arrayObject); + int arrayObjectCount = ArrayObjectCount(arrayObject); + + for (int index = 0; index < arrayObjectCount; index++) + { + list = lappend_int(list, datumObjectArray[index]); + } + + return list; +} + + +/* + * Converts Text ArrayType to Integer List. + */ +extern List * +TextArrayTypeToIntegerList(ArrayType *arrayObject) +{ + List *list = NULL; + Datum *datumObjectArray = DeconstructArrayObject(arrayObject); + int arrayObjectCount = ArrayObjectCount(arrayObject); + + for (int index = 0; index < arrayObjectCount; index++) + { + char *intAsStr = text_to_cstring(DatumGetTextP(datumObjectArray[index])); + list = lappend_int(list, pg_strtoint32(intAsStr)); + } + + return list; +} diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h index 1c5d3e176..8e055a6b7 100644 --- a/src/include/distributed/commands/multi_copy.h +++ b/src/include/distributed/commands/multi_copy.h @@ -182,6 +182,7 @@ extern void CheckCopyPermissions(CopyStmt *copyStatement); extern bool IsCopyResultStmt(CopyStmt *copyStatement); extern void ConversionPathForTypes(Oid inputType, Oid destType, CopyCoercionData *result); extern Datum CoerceColumnValue(Datum inputValue, CopyCoercionData *coercionPath); +extern void ReportCopyError(MultiConnection *connection, PGresult *result); #endif /* MULTI_COPY_H */ diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h index 9d687997c..501f5c233 100644 --- a/src/include/distributed/coordinator_protocol.h +++ b/src/include/distributed/coordinator_protocol.h @@ -283,6 +283,9 @@ extern int MasterDropAllShards(Oid relationId, char *schemaName, char *relationN extern Datum master_create_worker_shards(PG_FUNCTION_ARGS); extern Datum isolate_tenant_to_new_shard(PG_FUNCTION_ARGS); +/* function declarations for shard split functionality */ +extern Datum citus_split_shard_by_split_points(PG_FUNCTION_ARGS); + /* function declarations for shard repair functionality */ extern Datum master_copy_shard_placement(PG_FUNCTION_ARGS); diff --git a/src/include/distributed/intermediate_results.h b/src/include/distributed/intermediate_results.h index 791ebdbe7..63eca5ad1 100644 --- a/src/include/distributed/intermediate_results.h +++ b/src/include/distributed/intermediate_results.h @@ -69,12 +69,26 @@ typedef struct NodeToNodeFragmentsTransfer List *fragmentList; } NodeToNodeFragmentsTransfer; +/* Forward Declarations */ +struct CitusTableCacheEntry; /* intermediate_results.c */ extern DestReceiver * CreateRemoteFileDestReceiver(const char *resultId, EState *executorState, List *initialNodeList, bool writeLocalFile); +extern DestReceiver * CreatePartitionedResultDestReceiver(int partitionColumnIndex, + int partitionCount, + CitusTableCacheEntry * + shardSearchInfo, + DestReceiver ** + partitionedDestReceivers, + bool lazyStartup, + bool allowNullPartitionValues); +extern CitusTableCacheEntry * QueryTupleShardSearchInfo(ArrayType *minValuesArray, + ArrayType *maxValuesArray, + char partitionMethod, + Var *partitionColumn); extern void WriteToLocalFile(StringInfo copyData, FileCompat *fileCompat); extern uint64 RemoteFileDestReceiverBytesSent(DestReceiver *destReceiver); extern void SendQueryResultViaCopy(const char *resultId); @@ -83,6 +97,9 @@ extern void RemoveIntermediateResultsDirectories(void); extern int64 IntermediateResultSize(const char *resultId); extern char * QueryResultFileName(const char *resultId); extern char * CreateIntermediateResultsDirectory(void); +extern ArrayType * CreateArrayFromDatums(Datum *datumArray, bool *nullsArray, int + datumCount, Oid typeId); + /* distributed_intermediate_results.c */ extern List ** RedistributeTaskListResults(const char *resultIdPrefix, diff --git a/src/include/distributed/shard_split.h b/src/include/distributed/shard_split.h new file mode 100644 index 000000000..790e3d612 --- /dev/null +++ b/src/include/distributed/shard_split.h @@ -0,0 +1,46 @@ +/*------------------------------------------------------------------------- + * + * shard_split.h + * + * API for shard splits. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#ifndef SHARDSPLIT_H_ +#define SHARDSPLIT_H_ + +/* Split Modes supported by Shard Split API */ +typedef enum SplitMode +{ + BLOCKING_SPLIT = 0 +} SplitMode; + +/* + * User Scenario calling Split Shard API. + * The 'SplitOperation' type is used to customize info/error messages based on user scenario. + */ +typedef enum SplitOperation +{ + SHARD_SPLIT_API = 0, + ISOLATE_TENANT_TO_NEW_SHARD +} SplitOperation; + +/* + * SplitShard API to split a given shard (or shard group) using split mode and + * specified split points to a set of destination nodes. + */ +extern void SplitShard(SplitMode splitMode, + SplitOperation splitOperation, + uint64 shardIdToSplit, + List *shardSplitPointsList, + List *nodeIdsForPlacementList); + +/* TODO(niupre): Make all these APIs private when all consumers (Example : ISOLATE_TENANT_TO_NEW_SHARD) directly call 'SplitShard' API. */ +extern void ErrorIfCannotSplitShard(SplitOperation splitOperation, + ShardInterval *sourceShard); +extern void DropShardList(List *shardIntervalList); + +#endif /* SHARDSPLIT_H_ */ diff --git a/src/include/distributed/utils/array_type.h b/src/include/distributed/utils/array_type.h index cb03aafed..4599b8a9f 100644 --- a/src/include/distributed/utils/array_type.h +++ b/src/include/distributed/utils/array_type.h @@ -20,6 +20,7 @@ extern Datum * DeconstructArrayObject(ArrayType *arrayObject); extern int32 ArrayObjectCount(ArrayType *arrayObject); extern ArrayType * DatumArrayToArrayType(Datum *datumArray, int datumCount, Oid datumTypeId); - +extern List * IntegerArrayTypeToList(ArrayType *arrayObject); +extern List * TextArrayTypeToIntegerList(ArrayType *arrayObject); #endif /* CITUS_ARRAY_TYPE_H */ diff --git a/src/include/distributed/worker_shard_copy.h b/src/include/distributed/worker_shard_copy.h new file mode 100644 index 000000000..2ab2775f9 --- /dev/null +++ b/src/include/distributed/worker_shard_copy.h @@ -0,0 +1,22 @@ +/*------------------------------------------------------------------------- + * + * worker_shard_copy.c + * Copy data to destination shard in a push approach. + * + * Copyright (c) Citus Data, Inc. + * + * + *------------------------------------------------------------------------- + */ + +#ifndef WORKER_SHARD_COPY_H_ +#define WORKER_SHARD_COPY_H_ + +/* GUC, determining whether Binary Copy is enabled */ +extern bool EnableBinaryProtocol; + +extern DestReceiver * CreateShardCopyDestReceiver(EState *executorState, + List *destinationShardFullyQualifiedName, + uint32_t destinationNodeId); + +#endif /* WORKER_SHARD_COPY_H_ */ diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 3847e67ea..6007d2508 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -43,7 +43,7 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $ # intermediate, for muscle memory backward compatibility. check: check-full check-enterprise-full # check-full triggers all tests that ought to be run routinely -check-full: check-multi check-multi-mx check-multi-1 check-operations check-follower-cluster check-isolation check-failure +check-full: check-multi check-multi-mx check-multi-1 check-operations check-follower-cluster check-isolation check-failure check-split # check-enterprise-full triggers all enterprise specific tests check-enterprise-full: check-enterprise check-enterprise-isolation check-enterprise-failure @@ -216,6 +216,10 @@ check-columnar-isolation: all $(isolation_test_files) $(pg_regress_multi_check) --load-extension=citus --isolationtester \ -- $(MULTI_REGRESS_OPTS) --inputdir=$(citus_abs_srcdir)/build --schedule=$(citus_abs_srcdir)/columnar_isolation_schedule $(EXTRA_TESTS) +check-split: all + $(pg_regress_multi_check) --load-extension=citus \ + -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/split_schedule $(EXTRA_TESTS) + check-failure: all $(pg_regress_multi_check) --load-extension=citus --mitmproxy \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/failure_schedule $(EXTRA_TESTS) diff --git a/src/test/regress/enterprise_isolation_schedule b/src/test/regress/enterprise_isolation_schedule index 19eddce72..77134c3b2 100644 --- a/src/test/regress/enterprise_isolation_schedule +++ b/src/test/regress/enterprise_isolation_schedule @@ -13,3 +13,5 @@ test: isolation_ref2ref_foreign_keys_enterprise test: isolation_pg_send_cancellation test: isolation_shard_move_vs_start_metadata_sync test: isolation_tenant_isolation +test: isolation_blocking_shard_split +test: isolation_blocking_shard_split_with_fkey_to_reference diff --git a/src/test/regress/expected/citus_split_shard_by_split_points.out b/src/test/regress/expected/citus_split_shard_by_split_points.out new file mode 100644 index 000000000..743996160 --- /dev/null +++ b/src/test/regress/expected/citus_split_shard_by_split_points.out @@ -0,0 +1,459 @@ +/* +Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. +Here is a high level overview of test plan: + 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. + 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. + 3. Create Foreign key constraints between the two co-located distributed tables. + 4. Load data into the three tables. + 5. Move one of the shards for 'sensors' to test ShardMove -> Split. + 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. + 7. Move one of the split shard to test Split -> ShardMove. + 8. Split an already split shard second time on a different schema. +*/ +CREATE SCHEMA "citus_split_test_schema"; +CREATE ROLE test_split_role WITH LOGIN; +GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema" TO test_split_role; +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981000; +SET citus.next_placement_id TO 8610000; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. +CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + meaure_quantity decimal(15, 2), + measure_status char(1), + measure_comment varchar(44), + PRIMARY KEY (measureid, eventdatetime, measure_data)); +CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); +ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; +CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); +CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. +-- BEGIN: Create co-located distributed and reference tables. +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE table_with_index_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); +ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER table_with_index_rep_identity USING uqx; +SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- END: Create co-located distributed and reference tables. +-- BEGIN : Create Foreign key constraints. +ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- END : Create Foreign key constraints. +-- BEGIN : Load data into tables. +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; +SELECT COUNT(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- END: Load data into tables. +-- BEGIN : Display current state. +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8981000 | sensors | -2147483648 | -1 | localhost | 57637 + 8981001 | sensors | 0 | 2147483647 | localhost | 57638 + 8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637 + 8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638 + 8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637 + 8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638 +(6 rows) + +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid) +(1 row) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (lower((measureid)::text)) + sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema.table_with_index_rep_identity_8981005 USING btree (key) +(1 row) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981000 +(2 rows) + +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid) +(1 row) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (lower((measureid)::text)) + sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema.table_with_index_rep_identity_8981006 USING btree (key) +(1 row) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981001 +(2 rows) + +-- END : Display current state +-- BEGIN : Move one shard before we split it. +\c - postgres - :master_port +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981007; +SET citus.defer_drop_after_shard_move TO OFF; +SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- END : Move one shard before we split it. +-- BEGIN : Set node id variables +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END : Set node id variables +-- BEGIN : Split two shards : One with move and One without move. +-- Perform 2 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981000, + ARRAY['-1073741824'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +-- Perform 3 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981001, + ARRAY['536870911', '1610612735'], + ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], + 'block_writes'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +-- END : Split two shards : One with move and One without move. +-- BEGIN : Move a shard post split. +SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- END : Move a shard post split. +-- BEGIN : Display current state. +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638 + 8981008 | sensors | -1073741823 | -1 | localhost | 57638 + 8981013 | sensors | 0 | 536870911 | localhost | 57637 + 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 + 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 + 8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638 + 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 + 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 + 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 + 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 + 8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638 + 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 + 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 + 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 + 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 +(15 rows) + +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid) + sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid) +(2 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (lower((measureid)::text)) + sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (measureid, eventdatetime, measure_data) + sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (lower((measureid)::text)) + sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (measureid, eventdatetime, measure_data) +(8 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema.table_with_index_rep_identity_8981019 USING btree (key) + table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema.table_with_index_rep_identity_8981020 USING btree (key) +(2 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981013 + stats_on_sensors_8981014 +(3 rows) + +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid) + sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid) + sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid) +(3 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (lower((measureid)::text)) + sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (measureid, eventdatetime, measure_data) + sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (lower((measureid)::text)) + sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (measureid, eventdatetime, measure_data) + sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (lower((measureid)::text)) + sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (measureid, eventdatetime, measure_data) +(12 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema.table_with_index_rep_identity_8981011 USING btree (key) + table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema.table_with_index_rep_identity_8981012 USING btree (key) + table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema.table_with_index_rep_identity_8981021 USING btree (key) +(3 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981007 + stats_on_sensors_8981008 + stats_on_sensors_8981015 +(4 rows) + +-- END : Display current state +-- BEGIN: Should be able to change/drop constraints +\c - postgres - :master_port +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; +ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; +DROP STATISTICS stats_on_sensors; +DROP INDEX index_on_sensors_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; +-- END: Should be able to change/drop constraints +-- BEGIN: Split second time on another schema +SET search_path TO public; +SET citus.next_shard_id TO 8981031; +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981007, + ARRAY['-2100000000'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +SET search_path TO "citus_split_test_schema"; +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 + 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 + 8981008 | sensors | -1073741823 | -1 | localhost | 57638 + 8981013 | sensors | 0 | 536870911 | localhost | 57637 + 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 + 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 + 8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 + 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 + 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 + 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 + 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 + 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 + 8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 + 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 + 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 + 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 + 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 + 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 +(18 rows) + +-- END: Split second time on another schema +-- BEGIN: Validate Data Count +SELECT COUNT(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- END: Validate Data Count +--BEGIN : Cleanup +\c - postgres - :master_port +DROP SCHEMA "citus_split_test_schema" CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table citus_split_test_schema.sensors +drop cascades to table citus_split_test_schema.reference_table +drop cascades to table citus_split_test_schema.colocated_dist_table +drop cascades to table citus_split_test_schema.table_with_index_rep_identity +--END : Cleanup diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_failure.out b/src/test/regress/expected/citus_split_shard_by_split_points_failure.out new file mode 100644 index 000000000..4ea61e03c --- /dev/null +++ b/src/test/regress/expected/citus_split_shard_by_split_points_failure.out @@ -0,0 +1,105 @@ +CREATE SCHEMA "citus_split_failure_test_schema"; +SET search_path TO "citus_split_failure_test_schema"; +SET citus.shard_count TO 1; +SET citus.next_shard_id TO 890000; +SET citus.shard_replication_factor TO 1; +-- BEGIN: Create table to split +CREATE TABLE sensors( + measureid integer, + eventdatetime date); +CREATE TABLE sensors_colocated( + measureid integer, + eventdatetime2 date); +SELECT create_distributed_table('sensors', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('sensors_colocated', 'measureid', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- END: Create table to split +-- BEGIN : Switch to worker and create split shards already so workflow fails. +\c - - - :worker_1_port +SET search_path TO "citus_split_failure_test_schema"; +-- Don't create sensors_8981001, workflow will create and clean it. +-- Create rest of the shards so that the workflow fails, but will not clean them. +CREATE TABLE sensors_8981002( + measureid integer, + eventdatetime date); +CREATE TABLE sensors_colocated_8981003( + measureid integer, + eventdatetime date); +CREATE TABLE sensors_colocated_8981004( + measureid integer, + eventdatetime date); +-- A random table which should not be deleted. +CREATE TABLE sensors_nodelete( + measureid integer, + eventdatetime date); +-- List tables in worker. +SET search_path TO "citus_split_failure_test_schema"; +SET citus.show_shards_for_app_name_prefixes = '*'; +SELECT tbl.relname + FROM pg_catalog.pg_class tbl + WHERE tbl.relname like 'sensors%' + ORDER BY 1; + relname +--------------------------------------------------------------------- + sensors + sensors_890000 + sensors_8981002 + sensors_colocated + sensors_colocated_890001 + sensors_colocated_8981003 + sensors_colocated_8981004 + sensors_nodelete +(8 rows) + +-- END : Switch to worker and create split shards already so workflow fails. +-- BEGIN : Set node id variables +\c - postgres - :master_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +-- END : Set node id variables +-- BEGIN : Split Shard, which is expected to fail. +SET citus.next_shard_id TO 8981001; +SELECT pg_catalog.citus_split_shard_by_split_points( + 890000, + ARRAY['-1073741824'], + ARRAY[:worker_1_node, :worker_1_node], + 'block_writes'); +ERROR: relation "sensors_8981002" already exists +CONTEXT: while executing command on localhost:xxxxx +-- BEGIN : Split Shard, which is expected to fail. +-- BEGIN : Ensure tables were cleaned from worker +\c - - - :worker_1_port +SET search_path TO "citus_split_failure_test_schema"; +SET citus.show_shards_for_app_name_prefixes = '*'; +SELECT tbl.relname + FROM pg_catalog.pg_class tbl + WHERE tbl.relname like 'sensors%' + ORDER BY 1; + relname +--------------------------------------------------------------------- + sensors + sensors_890000 + sensors_8981002 + sensors_colocated + sensors_colocated_890001 + sensors_colocated_8981003 + sensors_colocated_8981004 + sensors_nodelete +(8 rows) + +-- END : Ensure tables were cleaned from worker +--BEGIN : Cleanup +\c - postgres - :master_port +DROP SCHEMA "citus_split_failure_test_schema" CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table citus_split_failure_test_schema.sensors +drop cascades to table citus_split_failure_test_schema.sensors_colocated +--END : Cleanup diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out new file mode 100644 index 000000000..5986fa74b --- /dev/null +++ b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out @@ -0,0 +1,173 @@ +-- Negative test cases for citus_split_shard_by_split_points UDF. +CREATE SCHEMA citus_split_shard_by_split_points_negative; +SET search_path TO citus_split_shard_by_split_points_negative; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 60761300; +CREATE TABLE range_paritioned_table_to_split(rid bigserial PRIMARY KEY, value char); +SELECT create_distributed_table('range_paritioned_table_to_split', 'rid', 'range'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Shards are not created automatically for range distributed table. +SELECT master_create_empty_shard('range_paritioned_table_to_split'); + master_create_empty_shard +--------------------------------------------------------------------- + 60761300 +(1 row) + +SET citus.next_shard_id TO 49761300; +CREATE TABLE table_to_split (id bigserial PRIMARY KEY, value char); +-- Shard1 | -2147483648 | -1073741825 +-- Shard2 | -1073741824 | -1 +-- Shard3 | 0 | 1073741823 +-- Shard4 | 1073741824 | 2147483647 +SELECT create_distributed_table('table_to_split','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- UDF fails for any other shard_transfer_mode other than block_writes. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201], + 'auto'); +ERROR: Shard Tranfer mode: 'auto' is not supported. Please use 'block_writes' instead. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201], + 'force_logical'); +ERROR: Shard Tranfer mode: 'force_logical' is not supported. Please use 'block_writes' instead. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201], + 'gibberish'); +ERROR: invalid input value for enum citus.shard_transfer_mode: "gibberish" +-- UDF fails for range partitioned tables. +SELECT citus_split_shard_by_split_points( + 60761300, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: Cannot split shard as operation is only supported for hash distributed tables. +-- UDF fails if number of placement node list does not exceed split points by one. +-- Example: One split point defines two way split (2 worker nodes needed). +SELECT citus_split_shard_by_split_points( + 49761300, + -- 2 split points defined making it a 3 way split but we only specify 2 placement lists. + ARRAY['-1073741826', '-107374182'], + ARRAY[:worker_1_node, :worker_2_node]); -- 2 worker nodes. +ERROR: Number of worker node ids should be one greater split points. NodeId count is '2' and SplitPoint count is '2'. +-- UDF fails if split ranges specified are not within the shard id to split. +SELECT citus_split_shard_by_split_points( + 49761300, -- Shard range is from (-2147483648, -1073741825) + ARRAY['0'], -- The range we specified is 0 which is not in the range. + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: Split point 0 is outside the min/max range(-2147483648, -1073741825) for shard id 49761300. +-- UDF fails if split points are not strictly increasing. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50', '35'], + ARRAY[:worker_1_node, :worker_2_node, :worker_1_node]); +ERROR: Invalid Split Points '50' followed by '35'. All split points should be strictly increasing. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50', '50'], + ARRAY[:worker_1_node, :worker_2_node, :worker_1_node]); +ERROR: Invalid Split Points '50' followed by '50'. All split points should be strictly increasing. +-- UDF fails if nodeIds are < 1 or Invalid. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[0, :worker_2_node]); +ERROR: Invalid Node Id '0'. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201]); +ERROR: Invalid Node Id '101'. +-- UDF fails if split point specified is equal to the max value in the range. +-- Example: ShardId 81060002 range is from (-2147483648, -1073741825) +-- '-1073741825' as split point is invalid. +-- '-1073741826' is valid and will split to: (-2147483648, -1073741826) and (-1073741825, -1073741825) +SELECT citus_split_shard_by_split_points( + 49761300, -- Shard range is from (-2147483648, -1073741825) + ARRAY['-1073741825'], -- Split point equals shard's max value. + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: Invalid split point -1073741825, as split points should be inclusive. Please use -1073741826 instead. +-- UDF fails if resulting shard count from split greater than MAX_SHARD_COUNT (64000) +-- 64000 split point definee 64000+1 way split (64001 worker nodes needed). +WITH shard_ranges AS (SELECT ((-2147483648 + indx))::text as split_points, :worker_1_node as node_ids FROM generate_series(1,64000) indx ) +SELECT citus_split_shard_by_split_points( + 49761300, + array_agg(split_points), + array_agg(node_ids) || :worker_1_node) --placement node list should exceed split points by one. +FROM shard_ranges; +ERROR: Resulting shard count '64001' with split is greater than max shard count '64000' limit. +-- UDF fails where source shard cannot be split further i.e min and max range is equal. +-- Create a Shard where range cannot be split further +SELECT isolate_tenant_to_new_shard('table_to_split', 1); + isolate_tenant_to_new_shard +--------------------------------------------------------------------- + 49761305 +(1 row) + +SELECT citus_split_shard_by_split_points( + 49761305, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: Cannot split shard id "49761305" as min/max range are equal: ('-1905060026', '-1905060026'). +-- Create distributed table with replication factor > 1 +SET citus.shard_replication_factor TO 2; +SET citus.next_shard_id TO 51261400; +CREATE TABLE table_to_split_replication_factor_2 (id bigserial PRIMARY KEY, value char); +SELECT create_distributed_table('table_to_split_replication_factor_2','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- UDF fails for replication factor > 1 +SELECT citus_split_shard_by_split_points( + 51261400, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: Operation split not supported for shard as replication factor '2' is greater than 1. +-- Create distributed table with columnar type. +SET citus.next_shard_id TO 51271400; +CREATE TABLE table_to_split_columnar (id bigserial PRIMARY KEY, value char) USING columnar; +SELECT create_distributed_table('table_to_split_columnar','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- UDF fails for columnar table. +SELECT citus_split_shard_by_split_points( + 51271400, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: Cannot split shard as operation is not supported for Columnar tables. +-- Create distributed table which is partitioned. +SET citus.next_shard_id TO 51271900; +CREATE TABLE table_to_split_partitioned(id integer, dt date) PARTITION BY RANGE(dt); +SELECT create_distributed_table('table_to_split_partitioned','id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- UDF fails for partitioned table. +SELECT citus_split_shard_by_split_points( + 51271900, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); +ERROR: cannot split of 'table_to_split_partitioned', because it is a partitioned table +DETAIL: In colocation group of 'table_to_split_partitioned', a partitioned relation exists: 'table_to_split_partitioned'. Citus does not support split of partitioned tables. diff --git a/src/test/regress/expected/isolation_blocking_shard_split.out b/src/test/regress/expected/isolation_blocking_shard_split.out new file mode 100644 index 000000000..02a23174e --- /dev/null +++ b/src/test/regress/expected/isolation_blocking_shard_split.out @@ -0,0 +1,951 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE to_split_table SET value = 111 WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM to_split_table WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE to_split_table SET value = 111 WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM to_split_table WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-blocking-shard-split s2-blocking-shard-split s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +step s1-commit: + COMMIT; + +step s2-blocking-shard-split: <... completed> +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500003|t | 0 + 57637|1500005|t | 1 + 57638|1500004|t | 0 + 57638|1500006|t | 0 +(4 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-insert s1-begin s1-blocking-shard-split s2-blocking-shard-split s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +step s1-commit: + COMMIT; + +step s2-blocking-shard-split: <... completed> +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500003|t | 0 + 57637|1500005|t | 1 + 57638|1500004|t | 0 + 57638|1500006|t | 0 +(4 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX test_table_index ON to_split_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 +(3 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX test_table_index ON to_split_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 +(3 rows) + diff --git a/src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out b/src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out new file mode 100644 index 000000000..9a6ed53eb --- /dev/null +++ b/src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out @@ -0,0 +1,301 @@ +Parsed test spec with 2 sessions + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM reference_table WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE reference_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO reference_table VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX reference_table_index ON reference_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 33cab2776..34aa1c2cc 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1080,7 +1080,7 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 11.1-1 ALTER EXTENSION citus UPDATE TO '11.1-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- access method columnar | function alter_columnar_table_reset(regclass,boolean,boolean,boolean,boolean) void | @@ -1103,7 +1103,10 @@ SELECT * FROM multi_extension.print_extension_changes(); table columnar.chunk_group | table columnar.options | table columnar.stripe | -(21 rows) + | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void + | function worker_split_copy(bigint,split_copy_info[]) void + | type split_copy_info +(24 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index e7e601f3f..98fe422a4 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -107,6 +107,7 @@ ORDER BY 1; function citus_shard_indexes_on_worker() function citus_shard_sizes() function citus_shards_on_worker() + function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) function citus_stat_activity() function citus_stat_statements() function citus_stat_statements_reset() @@ -233,6 +234,7 @@ ORDER BY 1; function worker_partitioned_table_size(regclass) function worker_record_sequence_dependency(regclass,regclass,name) function worker_save_query_explain_analyze(text,jsonb) + function worker_split_copy(bigint,split_copy_info[]) schema citus schema citus_internal sequence pg_dist_colocationid_seq @@ -256,6 +258,7 @@ ORDER BY 1; type citus.shard_transfer_mode type citus_copy_format type noderole + type split_copy_info view citus_dist_stat_activity view citus_lock_waits view citus_schema.citus_tables @@ -266,5 +269,5 @@ ORDER BY 1; view citus_stat_statements view pg_dist_shard_placement view time_partitions -(250 rows) +(253 rows) diff --git a/src/test/regress/expected/worker_shard_binary_copy_test.out b/src/test/regress/expected/worker_shard_binary_copy_test.out new file mode 100644 index 000000000..fc9b2cd86 --- /dev/null +++ b/src/test/regress/expected/worker_shard_binary_copy_test.out @@ -0,0 +1,227 @@ +CREATE SCHEMA worker_shard_binary_copy_test; +SET search_path TO worker_shard_binary_copy_test; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81060000; +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|' +-- END: Create distributed table and insert data. +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. +-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +\c - - - :worker_2_port +CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_shard_binary_copy_test.shard_to_split_copy_81060016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060000; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for source shard and targets shard in Worker1. +-- BEGIN: List row count for target shard in Worker2. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for targets shard in Worker2. +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform binary copy. +SET citus.enable_binary_protocol = TRUE; +SELECT * from worker_split_copy( + 81060000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81060015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81060016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way local shard split copy. +-- BEGIN: Trigger 2-way remote shard split copy. +SELECT * from worker_split_copy( + 81060000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81060015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_2_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81060016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_2_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way remote shard split copy. +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for local targets shard. +-- BEGIN: List updated row count for remote targets shard. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_shard_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for remote targets shard. +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA citus_split_shard_by_split_points_local CASCADE; +ERROR: schema "citus_split_shard_by_split_points_local" does not exist +-- END: CLEANUP. diff --git a/src/test/regress/expected/worker_shard_text_copy_test.out b/src/test/regress/expected/worker_shard_text_copy_test.out new file mode 100644 index 000000000..52b26cbb9 --- /dev/null +++ b/src/test/regress/expected/worker_shard_text_copy_test.out @@ -0,0 +1,227 @@ +CREATE SCHEMA worker_shard_text_copy_test; +SET search_path TO worker_shard_text_copy_test; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|' +-- END: Create distributed table and insert data. +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. +-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +\c - - - :worker_2_port +CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_shard_text_copy_test.shard_to_split_copy_81070016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070000; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for source shard and targets shard in Worker1. +-- BEGIN: List row count for target shard in Worker2. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for targets shard in Worker2. +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = FALSE; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way local shard split copy. +-- BEGIN: Trigger 2-way remote shard split copy. +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_2_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_2_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way remote shard split copy. +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for local targets shard. +-- BEGIN: List updated row count for remote targets shard. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_shard_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for remote targets shard. +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA citus_split_shard_by_split_points_local CASCADE; +ERROR: schema "citus_split_shard_by_split_points_local" does not exist +-- END: CLEANUP. diff --git a/src/test/regress/expected/worker_split_binary_copy_test.out b/src/test/regress/expected/worker_split_binary_copy_test.out new file mode 100644 index 000000000..07dacbdb1 --- /dev/null +++ b/src/test/regress/expected/worker_split_binary_copy_test.out @@ -0,0 +1,263 @@ +CREATE SCHEMA worker_split_binary_copy_test; +SET search_path TO worker_split_binary_copy_test; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81060000; +-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. +SELECT citus_remove_node('localhost', 8887); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_remove_node('localhost', 9995); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_remove_node('localhost', 9992); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_remove_node('localhost', 9998); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_remove_node('localhost', 9997); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_remove_node('localhost', 8888); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|' +-- END: Create distributed table and insert data. +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. +-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +\c - - - :worker_2_port +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060000; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for source shard and targets shard in Worker1. +-- BEGIN: List row count for target shard in Worker2. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for targets shard in Worker2. +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform binary copy. +SET citus.enable_binary_protocol = true; +SELECT * from worker_split_copy( + 81060000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81060015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81060016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way local shard split copy. +-- BEGIN: Trigger 2-way remote shard split copy. +SELECT * from worker_split_copy( + 81060000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81060015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_2_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81060016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_2_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way remote shard split copy. +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for local targets shard. +-- BEGIN: List updated row count for remote targets shard. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for remote targets shard. +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_binary_copy_test CASCADE; +-- END: CLEANUP. diff --git a/src/test/regress/expected/worker_split_copy_test.out b/src/test/regress/expected/worker_split_copy_test.out new file mode 100644 index 000000000..c17ef5aa4 --- /dev/null +++ b/src/test/regress/expected/worker_split_copy_test.out @@ -0,0 +1,142 @@ +CREATE SCHEMA worker_split_copy_test; +SET search_path TO worker_split_copy_test; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char); +SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id)); +-- END: Create distributed table and insert data. +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char); +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070000"; + count +--------------------------------------------------------------------- + 510 +(1 row) + +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; + count +--------------------------------------------------------------------- + 0 +(1 row) + +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070001"; + count +--------------------------------------------------------------------- + 490 +(1 row) + +-- END: List row count for source shard and targets shard in Worker1. +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node +-- BEGIN: Test Negative scenario +SELECT * from worker_split_copy( + 101, -- Invalid source shard id. + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + -1073741824, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + -1073741823, --split range begin + -1, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); +ERROR: could not find valid entry for shard xxxxx +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[] -- empty array + ); +ERROR: cannot determine type of empty array +HINT: Explicitly cast to the desired type, for example ARRAY[]::integer[]. +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[NULL] -- empty array + ); +ERROR: function worker_split_copy(integer, text[]) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[NULL::pg_catalog.split_copy_info]-- empty array + ); +ERROR: pg_catalog.split_copy_info array cannot contain null values +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ROW(NULL)]-- empty array + ); +ERROR: function worker_split_copy(integer, record[]) does not exist +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ROW(NULL, NULL, NULL, NULL)::pg_catalog.split_copy_info] -- empty array + ); +ERROR: destination_shard_id for pg_catalog.split_copy_info cannot be null. +-- END: Test Negative scenario +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = false; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + -1073741824, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + -1073741823, --split range begin + -1, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way local shard split copy. +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; + count +--------------------------------------------------------------------- + 247 +(1 row) + +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; + count +--------------------------------------------------------------------- + 263 +(1 row) + +-- END: List updated row count for local targets shard. +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_copy_test CASCADE; +-- END: CLEANUP. diff --git a/src/test/regress/expected/worker_split_text_copy_test.out b/src/test/regress/expected/worker_split_text_copy_test.out new file mode 100644 index 000000000..164d3a6d7 --- /dev/null +++ b/src/test/regress/expected/worker_split_text_copy_test.out @@ -0,0 +1,226 @@ +CREATE SCHEMA worker_split_text_copy_test; +SET search_path TO worker_split_text_copy_test; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|' +-- END: Create distributed table and insert data. +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. +-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +\c - - - :worker_2_port +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070000; + count +--------------------------------------------------------------------- + 22 +(1 row) + +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for source shard and targets shard in Worker1. +-- BEGIN: List row count for target shard in Worker2. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- END: List row count for targets shard in Worker2. +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = false; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way local shard split copy. +-- BEGIN: Trigger 2-way remote shard split copy. +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_2_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_2_node)::pg_catalog.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way remote shard split copy. +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for local targets shard. +-- BEGIN: List updated row count for remote targets shard. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; + count +--------------------------------------------------------------------- + 21 +(1 row) + +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- END: List updated row count for remote targets shard. +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_text_copy_test CASCADE; +-- END: CLEANUP. diff --git a/src/test/regress/spec/isolation_blocking_shard_split.spec b/src/test/regress/spec/isolation_blocking_shard_split.spec new file mode 100644 index 000000000..ddac66f5b --- /dev/null +++ b/src/test/regress/spec/isolation_blocking_shard_split.spec @@ -0,0 +1,146 @@ +setup +{ + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + SELECT setval('pg_dist_shardid_seq', 1500000); + + CREATE TABLE to_split_table (id int, value int); + SELECT create_distributed_table('to_split_table', 'id'); +} + +teardown +{ + DROP TABLE to_split_table; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; + + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; +} + +// cache all placements +step "s1-load-cache" +{ + -- Indirect way to load cache. + TRUNCATE to_split_table; +} + +step "s1-insert" +{ + -- Id '123456789' maps to shard 1500002. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + + INSERT INTO to_split_table VALUES (123456789, 1); +} + +step "s1-update" +{ + UPDATE to_split_table SET value = 111 WHERE id = 123456789; +} + +step "s1-delete" +{ + DELETE FROM to_split_table WHERE id = 123456789; +} + +step "s1-select" +{ + SELECT count(*) FROM to_split_table WHERE id = 123456789; +} + +step "s1-ddl" +{ + CREATE INDEX test_table_index ON to_split_table(id); +} + +step "s1-copy" +{ + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-blocking-shard-split" +{ + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-blocking-shard-split" +{ + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'block_writes'); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; +} + +step "s2-print-index-count" +{ + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; +} + +// Run shard split while concurrently performing DML and index creation +// We expect DML,Copy to fail because the shard they are waiting for is destroyed. + permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" + // The same tests without loading the cache at first + permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" + +// Concurrent shard split blocks on different shards of the same table (or any colocated table) + permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" + // The same test above without loading the cache at first + permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" + +// Concurrent DDL blocks on different shards of the same table (or any colocated table) + permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" + // The same tests without loading the cache at first + permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" diff --git a/src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec b/src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec new file mode 100644 index 000000000..49b56c4a5 --- /dev/null +++ b/src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec @@ -0,0 +1,104 @@ +setup +{ + SELECT setval('pg_dist_shardid_seq', 1500000); + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + + CREATE TABLE reference_table (id int PRIMARY KEY, value int); + SELECT create_reference_table('reference_table'); + + CREATE TABLE table_to_split (id int, value int); + SELECT create_distributed_table('table_to_split', 'id'); +} + +teardown +{ + DROP TABLE table_to_split CASCADE; + DROP TABLE reference_table CASCADE; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-insert" +{ + INSERT INTO reference_table VALUES (5, 10); +} + +step "s1-update" +{ + UPDATE reference_table SET value = 5 WHERE id = 5; +} + +step "s1-delete" +{ + DELETE FROM reference_table WHERE id = 5; +} + +step "s1-ddl" +{ + CREATE INDEX reference_table_index ON reference_table(id); +} + +step "s1-copy" +{ + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-blocking-shard-split" +{ + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'block_writes'); +} + +step "s2-add-fkey" +{ + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; +} + +// Run shard split while concurrently performing an DML and index creation on the +// reference table which the distributed table have a foreign key to. +// All modifications should block on shard split. +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule new file mode 100644 index 000000000..18601a1ab --- /dev/null +++ b/src/test/regress/split_schedule @@ -0,0 +1,15 @@ +# Split Shard tests. +# Include tests from 'minimal_schedule' for setup. +test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers +test: multi_cluster_management +test: multi_test_catalog_views +test: tablespace +# Helpers for foreign key catalogs. +test: foreign_key_to_reference_table +# Split tests go here. +test: worker_split_copy_test +test: worker_split_binary_copy_test +test: worker_split_text_copy_test +test: citus_split_shard_by_split_points_negative +test: citus_split_shard_by_split_points +test: citus_split_shard_by_split_points_failure diff --git a/src/test/regress/sql/citus_split_shard_by_split_points.sql b/src/test/regress/sql/citus_split_shard_by_split_points.sql new file mode 100644 index 000000000..6c2957953 --- /dev/null +++ b/src/test/regress/sql/citus_split_shard_by_split_points.sql @@ -0,0 +1,240 @@ +/* +Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. +Here is a high level overview of test plan: + 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. + 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. + 3. Create Foreign key constraints between the two co-located distributed tables. + 4. Load data into the three tables. + 5. Move one of the shards for 'sensors' to test ShardMove -> Split. + 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. + 7. Move one of the split shard to test Split -> ShardMove. + 8. Split an already split shard second time on a different schema. +*/ + +CREATE SCHEMA "citus_split_test_schema"; + +CREATE ROLE test_split_role WITH LOGIN; +GRANT USAGE, CREATE ON SCHEMA "citus_split_test_schema" TO test_split_role; +SET ROLE test_split_role; + +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981000; +SET citus.next_placement_id TO 8610000; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; + +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. +CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + meaure_quantity decimal(15, 2), + measure_status char(1), + measure_comment varchar(44), + PRIMARY KEY (measureid, eventdatetime, measure_data)); + +CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); +ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; +CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); +CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; + +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); +-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. + +-- BEGIN: Create co-located distributed and reference tables. +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); + +CREATE TABLE table_with_index_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); +ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER table_with_index_rep_identity USING uqx; +SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); +-- END: Create co-located distributed and reference tables. + +-- BEGIN : Create Foreign key constraints. +ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- END : Create Foreign key constraints. + +-- BEGIN : Load data into tables. +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; + +SELECT COUNT(*) FROM sensors; +SELECT COUNT(*) FROM reference_table; +SELECT COUNT(*) FROM colocated_dist_table; +-- END: Load data into tables. + +-- BEGIN : Display current state. +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; + +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; +-- END : Display current state + +-- BEGIN : Move one shard before we split it. +\c - postgres - :master_port +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981007; +SET citus.defer_drop_after_shard_move TO OFF; + +SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +-- END : Move one shard before we split it. + +-- BEGIN : Set node id variables +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END : Set node id variables + +-- BEGIN : Split two shards : One with move and One without move. +-- Perform 2 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981000, + ARRAY['-1073741824'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); + +-- Perform 3 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981001, + ARRAY['536870911', '1610612735'], + ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], + 'block_writes'); +-- END : Split two shards : One with move and One without move. + +-- BEGIN : Move a shard post split. +SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); +-- END : Move a shard post split. + +-- BEGIN : Display current state. +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; + +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; +-- END : Display current state + +-- BEGIN: Should be able to change/drop constraints +\c - postgres - :master_port +SET ROLE test_split_role; +SET search_path TO "citus_split_test_schema"; +ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; +ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; +DROP STATISTICS stats_on_sensors; +DROP INDEX index_on_sensors_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; +-- END: Should be able to change/drop constraints + +-- BEGIN: Split second time on another schema +SET search_path TO public; +SET citus.next_shard_id TO 8981031; +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981007, + ARRAY['-2100000000'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); + +SET search_path TO "citus_split_test_schema"; +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; +-- END: Split second time on another schema + +-- BEGIN: Validate Data Count +SELECT COUNT(*) FROM sensors; +SELECT COUNT(*) FROM reference_table; +SELECT COUNT(*) FROM colocated_dist_table; +-- END: Validate Data Count + +--BEGIN : Cleanup +\c - postgres - :master_port +DROP SCHEMA "citus_split_test_schema" CASCADE; +--END : Cleanup diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_failure.sql b/src/test/regress/sql/citus_split_shard_by_split_points_failure.sql new file mode 100644 index 000000000..0eb5e8c04 --- /dev/null +++ b/src/test/regress/sql/citus_split_shard_by_split_points_failure.sql @@ -0,0 +1,80 @@ +CREATE SCHEMA "citus_split_failure_test_schema"; + +SET search_path TO "citus_split_failure_test_schema"; +SET citus.shard_count TO 1; +SET citus.next_shard_id TO 890000; +SET citus.shard_replication_factor TO 1; + +-- BEGIN: Create table to split +CREATE TABLE sensors( + measureid integer, + eventdatetime date); + +CREATE TABLE sensors_colocated( + measureid integer, + eventdatetime2 date); + +SELECT create_distributed_table('sensors', 'measureid'); +SELECT create_distributed_table('sensors_colocated', 'measureid', colocate_with:='sensors'); +-- END: Create table to split + +-- BEGIN : Switch to worker and create split shards already so workflow fails. +\c - - - :worker_1_port +SET search_path TO "citus_split_failure_test_schema"; + +-- Don't create sensors_8981001, workflow will create and clean it. +-- Create rest of the shards so that the workflow fails, but will not clean them. + +CREATE TABLE sensors_8981002( + measureid integer, + eventdatetime date); + +CREATE TABLE sensors_colocated_8981003( + measureid integer, + eventdatetime date); + +CREATE TABLE sensors_colocated_8981004( + measureid integer, + eventdatetime date); + +-- A random table which should not be deleted. +CREATE TABLE sensors_nodelete( + measureid integer, + eventdatetime date); +-- List tables in worker. +SET search_path TO "citus_split_failure_test_schema"; +SET citus.show_shards_for_app_name_prefixes = '*'; +SELECT tbl.relname + FROM pg_catalog.pg_class tbl + WHERE tbl.relname like 'sensors%' + ORDER BY 1; +-- END : Switch to worker and create split shards already so workflow fails. + +-- BEGIN : Set node id variables +\c - postgres - :master_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +-- END : Set node id variables + +-- BEGIN : Split Shard, which is expected to fail. +SET citus.next_shard_id TO 8981001; +SELECT pg_catalog.citus_split_shard_by_split_points( + 890000, + ARRAY['-1073741824'], + ARRAY[:worker_1_node, :worker_1_node], + 'block_writes'); +-- BEGIN : Split Shard, which is expected to fail. + +-- BEGIN : Ensure tables were cleaned from worker +\c - - - :worker_1_port +SET search_path TO "citus_split_failure_test_schema"; +SET citus.show_shards_for_app_name_prefixes = '*'; +SELECT tbl.relname + FROM pg_catalog.pg_class tbl + WHERE tbl.relname like 'sensors%' + ORDER BY 1; +-- END : Ensure tables were cleaned from worker + +--BEGIN : Cleanup +\c - postgres - :master_port +DROP SCHEMA "citus_split_failure_test_schema" CASCADE; +--END : Cleanup diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql new file mode 100644 index 000000000..e730a8c28 --- /dev/null +++ b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql @@ -0,0 +1,145 @@ +-- Negative test cases for citus_split_shard_by_split_points UDF. + +CREATE SCHEMA citus_split_shard_by_split_points_negative; +SET search_path TO citus_split_shard_by_split_points_negative; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 60761300; + +CREATE TABLE range_paritioned_table_to_split(rid bigserial PRIMARY KEY, value char); +SELECT create_distributed_table('range_paritioned_table_to_split', 'rid', 'range'); +-- Shards are not created automatically for range distributed table. +SELECT master_create_empty_shard('range_paritioned_table_to_split'); + +SET citus.next_shard_id TO 49761300; +CREATE TABLE table_to_split (id bigserial PRIMARY KEY, value char); + +-- Shard1 | -2147483648 | -1073741825 +-- Shard2 | -1073741824 | -1 +-- Shard3 | 0 | 1073741823 +-- Shard4 | 1073741824 | 2147483647 +SELECT create_distributed_table('table_to_split','id'); + +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset + +-- UDF fails for any other shard_transfer_mode other than block_writes. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201], + 'auto'); + +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201], + 'force_logical'); + +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201], + 'gibberish'); + +-- UDF fails for range partitioned tables. +SELECT citus_split_shard_by_split_points( + 60761300, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); + +-- UDF fails if number of placement node list does not exceed split points by one. +-- Example: One split point defines two way split (2 worker nodes needed). +SELECT citus_split_shard_by_split_points( + 49761300, + -- 2 split points defined making it a 3 way split but we only specify 2 placement lists. + ARRAY['-1073741826', '-107374182'], + ARRAY[:worker_1_node, :worker_2_node]); -- 2 worker nodes. + +-- UDF fails if split ranges specified are not within the shard id to split. +SELECT citus_split_shard_by_split_points( + 49761300, -- Shard range is from (-2147483648, -1073741825) + ARRAY['0'], -- The range we specified is 0 which is not in the range. + ARRAY[:worker_1_node, :worker_2_node]); + +-- UDF fails if split points are not strictly increasing. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50', '35'], + ARRAY[:worker_1_node, :worker_2_node, :worker_1_node]); + +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50', '50'], + ARRAY[:worker_1_node, :worker_2_node, :worker_1_node]); + +-- UDF fails if nodeIds are < 1 or Invalid. +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[0, :worker_2_node]); + +SELECT citus_split_shard_by_split_points( + 49761302, + ARRAY['50'], + ARRAY[101, 201]); + +-- UDF fails if split point specified is equal to the max value in the range. +-- Example: ShardId 81060002 range is from (-2147483648, -1073741825) +-- '-1073741825' as split point is invalid. +-- '-1073741826' is valid and will split to: (-2147483648, -1073741826) and (-1073741825, -1073741825) +SELECT citus_split_shard_by_split_points( + 49761300, -- Shard range is from (-2147483648, -1073741825) + ARRAY['-1073741825'], -- Split point equals shard's max value. + ARRAY[:worker_1_node, :worker_2_node]); + +-- UDF fails if resulting shard count from split greater than MAX_SHARD_COUNT (64000) +-- 64000 split point definee 64000+1 way split (64001 worker nodes needed). +WITH shard_ranges AS (SELECT ((-2147483648 + indx))::text as split_points, :worker_1_node as node_ids FROM generate_series(1,64000) indx ) +SELECT citus_split_shard_by_split_points( + 49761300, + array_agg(split_points), + array_agg(node_ids) || :worker_1_node) --placement node list should exceed split points by one. +FROM shard_ranges; + +-- UDF fails where source shard cannot be split further i.e min and max range is equal. +-- Create a Shard where range cannot be split further +SELECT isolate_tenant_to_new_shard('table_to_split', 1); +SELECT citus_split_shard_by_split_points( + 49761305, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); + +-- Create distributed table with replication factor > 1 +SET citus.shard_replication_factor TO 2; +SET citus.next_shard_id TO 51261400; +CREATE TABLE table_to_split_replication_factor_2 (id bigserial PRIMARY KEY, value char); +SELECT create_distributed_table('table_to_split_replication_factor_2','id'); + +-- UDF fails for replication factor > 1 +SELECT citus_split_shard_by_split_points( + 51261400, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); + +-- Create distributed table with columnar type. +SET citus.next_shard_id TO 51271400; +CREATE TABLE table_to_split_columnar (id bigserial PRIMARY KEY, value char) USING columnar; +SELECT create_distributed_table('table_to_split_columnar','id'); + +-- UDF fails for columnar table. +SELECT citus_split_shard_by_split_points( + 51271400, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); + +-- Create distributed table which is partitioned. +SET citus.next_shard_id TO 51271900; +CREATE TABLE table_to_split_partitioned(id integer, dt date) PARTITION BY RANGE(dt); +SELECT create_distributed_table('table_to_split_partitioned','id'); + +-- UDF fails for partitioned table. +SELECT citus_split_shard_by_split_points( + 51271900, + ARRAY['-1073741826'], + ARRAY[:worker_1_node, :worker_2_node]); diff --git a/src/test/regress/sql/worker_split_binary_copy_test.sql b/src/test/regress/sql/worker_split_binary_copy_test.sql new file mode 100644 index 000000000..a47e968bd --- /dev/null +++ b/src/test/regress/sql/worker_split_binary_copy_test.sql @@ -0,0 +1,211 @@ +CREATE SCHEMA worker_split_binary_copy_test; +SET search_path TO worker_split_binary_copy_test; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81060000; + +-- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. +SELECT citus_remove_node('localhost', 8887); +SELECT citus_remove_node('localhost', 9995); +SELECT citus_remove_node('localhost', 9992); +SELECT citus_remove_node('localhost', 9998); +SELECT citus_remove_node('localhost', 9997); +SELECT citus_remove_node('localhost', 8888); + +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey'); + +\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|' +99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ +99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe +99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir +99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e +100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl +100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi +100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even +100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr +100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s +101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts +101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f +101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular +102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit +102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across +102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun +102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu +103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex +103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely +103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou +103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit +-1995148554|112942|2943|1|9|17594.46|0.04|0.04|N|O|1996-08-03|1996-05-31|1996-08-04|DELIVER IN PERSON|TRUCK|c realms print carefully car +-1686493264|15110|113|5|2|2050.22|0.03|0.08|R|F|1994-04-26|1994-03-15|1994-05-15|TAKE BACK RETURN|MAIL|e final, regular requests. carefully +\. + +-- END: Create distributed table and insert data. + +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. + +-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +\c - - - :worker_2_port +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy_81060016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy. + +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060000; +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; +-- END: List row count for source shard and targets shard in Worker1. + +-- BEGIN: List row count for target shard in Worker2. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; +-- END: List row count for targets shard in Worker2. + +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node + +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform binary copy. +SET citus.enable_binary_protocol = true; + +SELECT * from worker_split_copy( + 81060000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81060015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81060016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); +-- END: Trigger 2-way local shard split copy. + +-- BEGIN: Trigger 2-way remote shard split copy. +SELECT * from worker_split_copy( + 81060000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81060015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_2_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81060016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_2_node)::pg_catalog.split_copy_info + ] + ); +-- END: Trigger 2-way remote shard split copy. + +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; +-- END: List updated row count for local targets shard. + +-- BEGIN: List updated row count for remote targets shard. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060015; +SELECT COUNT(*) FROM worker_split_binary_copy_test.shard_to_split_copy_81060016; +-- END: List updated row count for remote targets shard. + +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_binary_copy_test CASCADE; +-- END: CLEANUP. diff --git a/src/test/regress/sql/worker_split_copy_test.sql b/src/test/regress/sql/worker_split_copy_test.sql new file mode 100644 index 000000000..b799eb305 --- /dev/null +++ b/src/test/regress/sql/worker_split_copy_test.sql @@ -0,0 +1,110 @@ +CREATE SCHEMA worker_split_copy_test; +SET search_path TO worker_split_copy_test; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; + +-- BEGIN: Create distributed table and insert data. + +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char); +SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id'); + +INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id)); + +-- END: Create distributed table and insert data. + +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char); +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. + +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070000"; +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; + +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070001"; +-- END: List row count for source shard and targets shard in Worker1. + +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node + +-- BEGIN: Test Negative scenario +SELECT * from worker_split_copy( + 101, -- Invalid source shard id. + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + -1073741824, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + -1073741823, --split range begin + -1, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); + +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[] -- empty array + ); + +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[NULL] -- empty array + ); + +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[NULL::pg_catalog.split_copy_info]-- empty array + ); + +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ROW(NULL)]-- empty array + ); + +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ROW(NULL, NULL, NULL, NULL)::pg_catalog.split_copy_info] -- empty array + ); +-- END: Test Negative scenario + +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = false; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + -1073741824, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + -1073741823, --split range begin + -1, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); +-- END: Trigger 2-way local shard split copy. + +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; +-- END: List updated row count for local targets shard. + +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_copy_test CASCADE; +-- END: CLEANUP. diff --git a/src/test/regress/sql/worker_split_text_copy_test.sql b/src/test/regress/sql/worker_split_text_copy_test.sql new file mode 100644 index 000000000..10791a66d --- /dev/null +++ b/src/test/regress/sql/worker_split_text_copy_test.sql @@ -0,0 +1,203 @@ +CREATE SCHEMA worker_split_text_copy_test; +SET search_path TO worker_split_text_copy_test; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; + +-- BEGIN: Create distributed table and insert data. + +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +SELECT create_distributed_table('shard_to_split_copy', 'l_orderkey'); + +\COPY shard_to_split_copy FROM STDIN WITH DELIMITER '|' +99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ +99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe +99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir +99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e +100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl +100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi +100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even +100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr +100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s +101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts +101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f +101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular +102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit +102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across +102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun +102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu +103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex +103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely +103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou +103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit +-1995148554|112942|2943|1|9|17594.46|0.04|0.04|N|O|1996-08-03|1996-05-31|1996-08-04|DELIVER IN PERSON|TRUCK|c realms print carefully car +-1686493264|15110|113|5|2|2050.22|0.03|0.08|R|F|1994-04-26|1994-03-15|1994-05-15|TAKE BACK RETURN|MAIL|e final, regular requests. carefully +\. + +-- END: Create distributed table and insert data. + +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. + +-- BEGIN: Switch to Worker2, Create target shards in worker for remote 2-way split copy. +\c - - - :worker_2_port +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070015 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +CREATE TABLE worker_split_text_copy_test.shard_to_split_copy_81070016 ( + l_orderkey bigint not null, + l_partkey integer not null, + l_suppkey integer not null, + l_linenumber integer not null, + l_quantity decimal(15, 2) not null, + l_extendedprice decimal(15, 2) not null, + l_discount decimal(15, 2) not null, + l_tax decimal(15, 2) not null, + l_returnflag char(1) not null, + l_linestatus char(1) not null, + l_shipdate date not null, + l_commitdate date not null, + l_receiptdate date not null, + l_shipinstruct char(25) not null, + l_shipmode char(10) not null, + l_comment varchar(44) not null); +-- End: Switch to Worker2, Create target shards in worker for remote 2-way split copy. + +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070000; +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; +-- END: List row count for source shard and targets shard in Worker1. + +-- BEGIN: List row count for target shard in Worker2. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; +-- END: List row count for targets shard in Worker2. + +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node + +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = false; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_1_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_1_node)::pg_catalog.split_copy_info + ] + ); +-- END: Trigger 2-way local shard split copy. + +-- BEGIN: Trigger 2-way remote shard split copy. +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + 1073741823, --split range end + :worker_2_node)::pg_catalog.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + 1073741824, --split range begin + 2147483647, --split range end + :worker_2_node)::pg_catalog.split_copy_info + ] + ); +-- END: Trigger 2-way remote shard split copy. + +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; +-- END: List updated row count for local targets shard. + +-- BEGIN: List updated row count for remote targets shard. +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070015; +SELECT COUNT(*) FROM worker_split_text_copy_test.shard_to_split_copy_81070016; +-- END: List updated row count for remote targets shard. + +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_text_copy_test CASCADE; +-- END: CLEANUP. From 9d232d7b00c37b1ad386fc5ed387c2e8bac6728a Mon Sep 17 00:00:00 2001 From: aykutbozkurt Date: Thu, 14 Jul 2022 14:38:29 +0300 Subject: [PATCH 03/10] change address method to return list of addresses --- src/backend/distributed/commands/collation.c | 35 ++-- src/backend/distributed/commands/database.c | 8 +- src/backend/distributed/commands/domain.c | 26 +-- src/backend/distributed/commands/extension.c | 16 +- .../distributed/commands/foreign_server.c | 30 +-- src/backend/distributed/commands/function.c | 34 ++-- src/backend/distributed/commands/role.c | 22 +-- src/backend/distributed/commands/schema.c | 14 +- src/backend/distributed/commands/sequence.c | 32 +-- src/backend/distributed/commands/statistics.c | 16 +- src/backend/distributed/commands/table.c | 8 +- .../distributed/commands/text_search.c | 102 +++++----- src/backend/distributed/commands/trigger.c | 8 +- src/backend/distributed/commands/type.c | 64 +++--- src/backend/distributed/commands/view.c | 32 +-- .../distributed/deparser/objectaddress.c | 24 ++- src/include/distributed/commands.h | 182 +++++++++--------- src/include/distributed/deparser.h | 2 +- 18 files changed, 345 insertions(+), 310 deletions(-) diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index f9b977447..492e2ace2 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -169,7 +169,7 @@ CreateCollationDDLsIdempotent(Oid collationId) } -ObjectAddress +List * AlterCollationOwnerObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); @@ -177,8 +177,13 @@ AlterCollationOwnerObjectAddress(Node *node, bool missing_ok) Assert(stmt->objectType == OBJECT_COLLATION); - return get_object_address(stmt->objectType, stmt->object, &relation, - AccessExclusiveLock, missing_ok); + ObjectAddress objectAddress = get_object_address(stmt->objectType, stmt->object, + &relation, AccessExclusiveLock, + missing_ok); + + ObjectAddress *objectAddressCopy = palloc0(sizeof(ObjectAddress)); + *objectAddressCopy = objectAddress; + return list_make1(objectAddressCopy); } @@ -186,17 +191,17 @@ AlterCollationOwnerObjectAddress(Node *node, bool missing_ok) * RenameCollationStmtObjectAddress returns the ObjectAddress of the type that is the object * of the RenameStmt. Errors if missing_ok is false. */ -ObjectAddress +List * RenameCollationStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_COLLATION); Oid collationOid = get_collation_oid((List *) stmt->object, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, CollationRelationId, collationOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, CollationRelationId, collationOid); - return address; + return list_make1(address); } @@ -209,7 +214,7 @@ RenameCollationStmtObjectAddress(Node *node, bool missing_ok) * new schema. Errors if missing_ok is false and the type cannot be found in either of the * schemas. */ -ObjectAddress +List * AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -232,9 +237,9 @@ AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, CollationRelationId, collationOid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, CollationRelationId, collationOid); + return list_make1(address); } @@ -291,15 +296,15 @@ GenerateBackupNameForCollationCollision(const ObjectAddress *address) } -ObjectAddress +List * DefineCollationStmtObjectAddress(Node *node, bool missing_ok) { DefineStmt *stmt = castNode(DefineStmt, node); Assert(stmt->kind == OBJECT_COLLATION); Oid collOid = get_collation_oid(stmt->defnames, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, CollationRelationId, collOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, CollationRelationId, collOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 6f451893e..2bd03d5d8 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -40,17 +40,17 @@ bool EnableAlterDatabaseOwner = true; * AlterDatabaseOwnerObjectAddress returns the ObjectAddress of the database that is the * object of the AlterOwnerStmt. Errors if missing_ok is false. */ -ObjectAddress +List * AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Assert(stmt->objectType == OBJECT_DATABASE); Oid databaseOid = get_database_oid(strVal((String *) stmt->object), missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, DatabaseRelationId, databaseOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, DatabaseRelationId, databaseOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c index c75af0024..50d195d58 100644 --- a/src/backend/distributed/commands/domain.c +++ b/src/backend/distributed/commands/domain.c @@ -37,7 +37,7 @@ static CollateClause * MakeCollateClauseFromOid(Oid collationOid); -static ObjectAddress GetDomainAddressByName(TypeName *domainName, bool missing_ok); +static List * GetDomainAddressByName(TypeName *domainName, bool missing_ok); /* * GetDomainAddressByName returns the ObjectAddress of the domain identified by @@ -45,13 +45,13 @@ static ObjectAddress GetDomainAddressByName(TypeName *domainName, bool missing_o * InvalidOid. When missing_ok is false this function will raise an error instead when the * domain can't be found. */ -static ObjectAddress +static List * GetDomainAddressByName(TypeName *domainName, bool missing_ok) { - ObjectAddress address = { 0 }; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); Oid domainOid = LookupTypeNameOid(NULL, domainName, missing_ok); - ObjectAddressSet(address, TypeRelationId, domainOid); - return address; + ObjectAddressSet(*address, TypeRelationId, domainOid); + return list_make1(address); } @@ -229,17 +229,17 @@ MakeCollateClauseFromOid(Oid collationOid) * created by the statement. When missing_ok is false the function will raise an error if * the domain cannot be found in the local catalog. */ -ObjectAddress +List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok) { CreateDomainStmt *stmt = castNode(CreateDomainStmt, node); TypeName *typeName = makeTypeNameFromNameList(stmt->domainname); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -248,7 +248,7 @@ CreateDomainStmtObjectAddress(Node *node, bool missing_ok) * When missing_ok is false this function will raise an error when the domain is not * found. */ -ObjectAddress +List * AlterDomainStmtObjectAddress(Node *node, bool missing_ok) { AlterDomainStmt *stmt = castNode(AlterDomainStmt, node); @@ -263,7 +263,7 @@ AlterDomainStmtObjectAddress(Node *node, bool missing_ok) * which the constraint is being renamed. When missing_ok this function will raise an * error if the domain cannot be found. */ -ObjectAddress +List * DomainRenameConstraintStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -278,7 +278,7 @@ DomainRenameConstraintStmtObjectAddress(Node *node, bool missing_ok) * being changed. When missing_ok is false this function will raise an error if the domain * cannot be found. */ -ObjectAddress +List * AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); @@ -294,7 +294,7 @@ AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok) * When missing_ok is false this function will raise an error when the domain cannot be * found. */ -ObjectAddress +List * RenameDomainStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 6f45cec5b..e4979f035 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -1128,7 +1128,7 @@ GetDependentFDWsToExtension(Oid extensionId) * AlterExtensionSchemaStmtObjectAddress returns the ObjectAddress of the extension that is * the subject of the AlterObjectSchemaStmt. Errors if missing_ok is false. */ -ObjectAddress +List * AlterExtensionSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -1145,10 +1145,10 @@ AlterExtensionSchemaStmtObjectAddress(Node *node, bool missing_ok) extensionName))); } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, ExtensionRelationId, extensionOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, ExtensionRelationId, extensionOid); - return address; + return list_make1(address); } @@ -1156,7 +1156,7 @@ AlterExtensionSchemaStmtObjectAddress(Node *node, bool missing_ok) * AlterExtensionUpdateStmtObjectAddress returns the ObjectAddress of the extension that is * the subject of the AlterExtensionStmt. Errors if missing_ok is false. */ -ObjectAddress +List * AlterExtensionUpdateStmtObjectAddress(Node *node, bool missing_ok) { AlterExtensionStmt *stmt = castNode(AlterExtensionStmt, node); @@ -1171,10 +1171,10 @@ AlterExtensionUpdateStmtObjectAddress(Node *node, bool missing_ok) extensionName))); } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, ExtensionRelationId, extensionOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, ExtensionRelationId, extensionOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index 0502b2ef4..36b6094a9 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -16,6 +16,7 @@ #include "distributed/commands.h" #include "distributed/deparser.h" #include "distributed/listutils.h" +#include "distributed/log_utils.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" @@ -29,7 +30,7 @@ static char * GetForeignServerAlterOwnerCommand(Oid serverId); static Node * RecreateForeignServerStmt(Oid serverId); static bool NameListHasDistributedServer(List *serverNames); -static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing_ok); +static List * GetObjectAddressByServerName(char *serverName, bool missing_ok); /* @@ -40,7 +41,7 @@ static ObjectAddress GetObjectAddressByServerName(char *serverName, bool missing * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok) { CreateForeignServerStmt *stmt = castNode(CreateForeignServerStmt, node); @@ -57,7 +58,7 @@ CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok) * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok) { AlterForeignServerStmt *stmt = castNode(AlterForeignServerStmt, node); @@ -121,7 +122,7 @@ PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -139,7 +140,7 @@ RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok) * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * AlterForeignServerOwnerStmtObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); @@ -245,9 +246,16 @@ NameListHasDistributedServer(List *serverNames) String *serverValue = NULL; foreach_ptr(serverValue, serverNames) { - ObjectAddress address = GetObjectAddressByServerName(strVal(serverValue), false); + List *addresses = GetObjectAddressByServerName(strVal(serverValue), false); + if (list_length(addresses) > 1) + { + ereport(ERROR, errmsg( + "citus does not support multiple object addresses in NameListHasDistributedServer")); + } - if (IsObjectDistributed(&address)) + ObjectAddress *address = linitial(addresses); + + if (IsObjectDistributed(address)) { return true; } @@ -257,13 +265,13 @@ NameListHasDistributedServer(List *serverNames) } -static ObjectAddress +static List * GetObjectAddressByServerName(char *serverName, bool missing_ok) { ForeignServer *server = GetForeignServerByName(serverName, missing_ok); Oid serverOid = server->serverid; - ObjectAddress address = { 0 }; - ObjectAddressSet(address, ForeignServerRelationId, serverOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, ForeignServerRelationId, serverOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 73c014c6a..ccf2ef2f3 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -88,9 +88,9 @@ static void EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt); static bool ShouldPropagateAlterFunction(const ObjectAddress *address); static bool ShouldAddFunctionSignature(FunctionParameterMode mode); -static ObjectAddress FunctionToObjectAddress(ObjectType objectType, - ObjectWithArgs *objectWithArgs, - bool missing_ok); +static List * FunctionToObjectAddress(ObjectType objectType, + ObjectWithArgs *objectWithArgs, + bool missing_ok); static void ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt); static char * quote_qualified_func_name(Oid funcOid); static void DistributeFunctionWithDistributionArgument(RegProcedure funcOid, @@ -1405,7 +1405,7 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) * CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the * normal postgres error for unfound functions. */ -ObjectAddress +List * CreateFunctionStmtObjectAddress(Node *node, bool missing_ok) { CreateFunctionStmt *stmt = castNode(CreateFunctionStmt, node); @@ -1440,7 +1440,7 @@ CreateFunctionStmtObjectAddress(Node *node, bool missing_ok) * * objectId in the address can be invalid if missing_ok was set to true. */ -ObjectAddress +List * DefineAggregateStmtObjectAddress(Node *node, bool missing_ok) { DefineStmt *stmt = castNode(DefineStmt, node); @@ -1576,7 +1576,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString, * is the subject of an ALTER FUNCTION ... DEPENS ON EXTENSION ... statement. If * missing_ok is set to false the lookup will raise an error. */ -ObjectAddress +List * AlterFunctionDependsStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectDependsStmt *stmt = castNode(AlterObjectDependsStmt, node); @@ -1592,7 +1592,7 @@ AlterFunctionDependsStmtObjectAddress(Node *node, bool missing_ok) * AlterFunctionStmt. If missing_ok is set to false an error will be raised if postgres * was unable to find the function/procedure that was the target of the statement. */ -ObjectAddress +List * AlterFunctionStmtObjectAddress(Node *node, bool missing_ok) { AlterFunctionStmt *stmt = castNode(AlterFunctionStmt, node); @@ -1604,7 +1604,7 @@ AlterFunctionStmtObjectAddress(Node *node, bool missing_ok) * RenameFunctionStmtObjectAddress returns the ObjectAddress of the function that is the * subject of the RenameStmt. Errors if missing_ok is false. */ -ObjectAddress +List * RenameFunctionStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -1617,7 +1617,7 @@ RenameFunctionStmtObjectAddress(Node *node, bool missing_ok) * AlterFunctionOwnerObjectAddress returns the ObjectAddress of the function that is the * subject of the AlterOwnerStmt. Errors if missing_ok is false. */ -ObjectAddress +List * AlterFunctionOwnerObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); @@ -1635,7 +1635,7 @@ AlterFunctionOwnerObjectAddress(Node *node, bool missing_ok) * the new schema. Errors if missing_ok is false and the type cannot be found in either of * the schemas. */ -ObjectAddress +List * AlterFunctionSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -1680,10 +1680,10 @@ AlterFunctionSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, ProcedureRelationId, funcOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, ProcedureRelationId, funcOid); - return address; + return list_make1(address); } @@ -1827,17 +1827,17 @@ ShouldAddFunctionSignature(FunctionParameterMode mode) * Function/Procedure/Aggregate. If missing_ok is set to false an error will be * raised by postgres explaining the Function/Procedure could not be found. */ -static ObjectAddress +static List * FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs, bool missing_ok) { AssertObjectTypeIsFunctional(objectType); Oid funcOid = LookupFuncWithArgs(objectType, objectWithArgs, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, ProcedureRelationId, funcOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, ProcedureRelationId, funcOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 4c4a7053a..6d19d6726 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -74,7 +74,7 @@ static Node * makeFloatConst(char *str, int location); static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec *role); static VariableSetStmt * MakeVariableSetStmt(const char *config); static int ConfigGenericNameCompare(const void *lhs, const void *rhs); -static ObjectAddress RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok); +static List * RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok); /* controlled via GUC */ bool EnableCreateRolePropagation = true; @@ -87,7 +87,7 @@ bool EnableAlterRoleSetPropagation = true; * AlterRoleStmt. If missing_ok is set to false an error will be raised if postgres * was unable to find the role that was the target of the statement. */ -ObjectAddress +List * AlterRoleStmtObjectAddress(Node *node, bool missing_ok) { AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); @@ -100,7 +100,7 @@ AlterRoleStmtObjectAddress(Node *node, bool missing_ok) * AlterRoleSetStmt. If missing_ok is set to false an error will be raised if postgres * was unable to find the role that was the target of the statement. */ -ObjectAddress +List * AlterRoleSetStmtObjectAddress(Node *node, bool missing_ok) { AlterRoleSetStmt *stmt = castNode(AlterRoleSetStmt, node); @@ -113,19 +113,19 @@ AlterRoleSetStmtObjectAddress(Node *node, bool missing_ok) * RoleSpec. If missing_ok is set to false an error will be raised by postgres * explaining the Role could not be found. */ -static ObjectAddress +static List * RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok) { - ObjectAddress address = { 0 }; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); if (role != NULL) { /* roles can be NULL for statements on ALL roles eg. ALTER ROLE ALL SET ... */ Oid roleOid = get_rolespec_oid(role, missing_ok); - ObjectAddressSet(address, AuthIdRelationId, roleOid); + ObjectAddressSet(*address, AuthIdRelationId, roleOid); } - return address; + return list_make1(address); } @@ -1179,15 +1179,15 @@ ConfigGenericNameCompare(const void *a, const void *b) * Never returns NULL, but the objid in the address could be invalid if missing_ok was set * to true. */ -ObjectAddress +List * CreateRoleStmtObjectAddress(Node *node, bool missing_ok) { CreateRoleStmt *stmt = castNode(CreateRoleStmt, node); Oid roleOid = get_role_oid(stmt->role, missing_ok); - ObjectAddress roleAddress = { 0 }; - ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, roleOid); - return roleAddress; + return list_make1(roleAddress); } diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 47af36efd..825a56b09 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -40,7 +40,7 @@ #include "utils/relcache.h" -static ObjectAddress GetObjectAddressBySchemaName(char *schemaName, bool missing_ok); +static List * GetObjectAddressBySchemaName(char *schemaName, bool missing_ok); static List * FilterDistributedSchemas(List *schemas); static bool SchemaHasDistributedTableWithFKey(char *schemaName); static bool ShouldPropagateCreateSchemaStmt(void); @@ -183,7 +183,7 @@ PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, * CreateSchemaStmtObjectAddress returns the ObjectAddress of the schema that is * the object of the CreateSchemaStmt. Errors if missing_ok is false. */ -ObjectAddress +List * CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) { CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); @@ -213,7 +213,7 @@ CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) * AlterSchemaRenameStmtObjectAddress returns the ObjectAddress of the schema that is * the object of the RenameStmt. Errors if missing_ok is false. */ -ObjectAddress +List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -227,15 +227,15 @@ AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok) * GetObjectAddressBySchemaName returns the ObjectAddress of the schema with the * given name. Errors out if schema is not found and missing_ok is false. */ -ObjectAddress +List * GetObjectAddressBySchemaName(char *schemaName, bool missing_ok) { Oid schemaOid = get_namespace_oid(schemaName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, NamespaceRelationId, schemaOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, NamespaceRelationId, schemaOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 4e5cd18de..324e373b5 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -358,7 +358,7 @@ PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtility * RenameSequenceStmtObjectAddress returns the ObjectAddress of the sequence that is the * subject of the RenameStmt. */ -ObjectAddress +List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -366,10 +366,10 @@ RenameSequenceStmtObjectAddress(Node *node, bool missing_ok) RangeVar *sequence = stmt->relation; Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok); - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, seqOid); + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, seqOid); - return sequenceAddress; + return list_make1(sequenceAddress); } @@ -471,17 +471,17 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress) * AlterSequenceStmtObjectAddress returns the ObjectAddress of the sequence that is the * subject of the AlterSeqStmt. */ -ObjectAddress +List * AlterSequenceStmtObjectAddress(Node *node, bool missing_ok) { AlterSeqStmt *stmt = castNode(AlterSeqStmt, node); RangeVar *sequence = stmt->sequence; Oid seqOid = RangeVarGetRelid(sequence, NoLock, stmt->missing_ok); - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, seqOid); + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, seqOid); - return sequenceAddress; + return list_make1(sequenceAddress); } @@ -521,7 +521,7 @@ PreprocessAlterSequenceSchemaStmt(Node *node, const char *queryString, * AlterSequenceSchemaStmtObjectAddress returns the ObjectAddress of the sequence that is * the subject of the AlterObjectSchemaStmt. */ -ObjectAddress +List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -555,10 +555,10 @@ AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, seqOid); + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, seqOid); - return sequenceAddress; + return list_make1(sequenceAddress); } @@ -623,7 +623,7 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, * AlterSequenceOwnerStmtObjectAddress returns the ObjectAddress of the sequence that is the * subject of the AlterOwnerStmt. */ -ObjectAddress +List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); @@ -631,10 +631,10 @@ AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok) RangeVar *sequence = stmt->relation; Oid seqOid = RangeVarGetRelid(sequence, NoLock, missing_ok); - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, seqOid); + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, seqOid); - return sequenceAddress; + return list_make1(sequenceAddress); } diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index bf48505a5..48ed8df20 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -138,16 +138,16 @@ PostprocessCreateStatisticsStmt(Node *node, const char *queryString) * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * CreateStatisticsStmtObjectAddress(Node *node, bool missingOk) { CreateStatsStmt *stmt = castNode(CreateStatsStmt, node); - ObjectAddress address = { 0 }; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); Oid statsOid = get_statistics_object_oid(stmt->defnames, missingOk); - ObjectAddressSet(address, StatisticExtRelationId, statsOid); + ObjectAddressSet(*address, StatisticExtRelationId, statsOid); - return address; + return list_make1(address); } @@ -322,18 +322,18 @@ PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString) * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - ObjectAddress address = { 0 }; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); String *statName = llast((List *) stmt->object); Oid statsOid = get_statistics_object_oid(list_make2(makeString(stmt->newschema), statName), missingOk); - ObjectAddressSet(address, StatisticExtRelationId, statsOid); + ObjectAddressSet(*address, StatisticExtRelationId, statsOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 8a73f1471..05f2a82ab 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -3353,7 +3353,7 @@ ErrorIfUnsupportedAlterAddConstraintStmt(AlterTableStmt *alterTableStatement) * will look in the new schema. Errors if missing_ok is false and the table cannot * be found in either of the schemas. */ -ObjectAddress +List * AlterTableSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -3389,10 +3389,10 @@ AlterTableSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, RelationRelationId, tableOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, RelationRelationId, tableOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c index 05319324d..22ff5df2f 100644 --- a/src/backend/distributed/commands/text_search.c +++ b/src/backend/distributed/commands/text_search.c @@ -569,7 +569,7 @@ get_ts_parser_namelist(Oid tsparserOid) * being created. If missing_pk is false the function will error, explaining to the user * the text search configuration described in the statement doesn't exist. */ -ObjectAddress +List * CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) { DefineStmt *stmt = castNode(DefineStmt, node); @@ -577,9 +577,9 @@ CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) Oid objid = get_ts_config_oid(stmt->defnames, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSConfigRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSConfigRelationId, objid); + return list_make1(address); } @@ -588,7 +588,7 @@ CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) * being created. If missing_pk is false the function will error, explaining to the user * the text search dictionary described in the statement doesn't exist. */ -ObjectAddress +List * CreateTextSearchDictObjectAddress(Node *node, bool missing_ok) { DefineStmt *stmt = castNode(DefineStmt, node); @@ -596,9 +596,9 @@ CreateTextSearchDictObjectAddress(Node *node, bool missing_ok) Oid objid = get_ts_dict_oid(stmt->defnames, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSDictionaryRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSDictionaryRelationId, objid); + return list_make1(address); } @@ -607,7 +607,7 @@ CreateTextSearchDictObjectAddress(Node *node, bool missing_ok) * SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not * exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -615,9 +615,9 @@ RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSConfigRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSConfigRelationId, objid); + return list_make1(address); } @@ -626,7 +626,7 @@ RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) * SEARCH DICTIONARY being renamed. Optionally errors if the dictionary does not * exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -634,9 +634,9 @@ RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSDictionaryRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSDictionaryRelationId, objid); + return list_make1(address); } @@ -645,16 +645,16 @@ RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) * SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not * exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) { AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); Oid objid = get_ts_config_oid(stmt->cfgname, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSConfigRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSConfigRelationId, objid); + return list_make1(address); } @@ -663,16 +663,16 @@ AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) * SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not * exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) { AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); Oid objid = get_ts_dict_oid(stmt->dictname, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSDictionaryRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSDictionaryRelationId, objid); + return list_make1(address); } @@ -685,7 +685,7 @@ AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) * the triple checking before the error might be thrown. Errors for non-existing schema's * in edgecases will be raised by postgres while executing the move. */ -ObjectAddress +List * AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -723,9 +723,9 @@ AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, TSConfigRelationId, objid); - return sequenceAddress; + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, TSConfigRelationId, objid); + return list_make1(sequenceAddress); } @@ -738,7 +738,7 @@ AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) * the triple checking before the error might be thrown. Errors for non-existing schema's * in edgecases will be raised by postgres while executing the move. */ -ObjectAddress +List * AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -776,9 +776,9 @@ AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, TSDictionaryRelationId, objid); - return sequenceAddress; + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, TSDictionaryRelationId, objid); + return list_make1(sequenceAddress); } @@ -787,7 +787,7 @@ AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok) * SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the * configuration does not exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) { CommentStmt *stmt = castNode(CommentStmt, node); @@ -795,9 +795,9 @@ TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSConfigRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSConfigRelationId, objid); + return list_make1(address); } @@ -806,7 +806,7 @@ TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) * DICTIONARY on which the comment is placed. Optionally errors if the dictionary does not * exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * TextSearchDictCommentObjectAddress(Node *node, bool missing_ok) { CommentStmt *stmt = castNode(CommentStmt, node); @@ -814,9 +814,9 @@ TextSearchDictCommentObjectAddress(Node *node, bool missing_ok) Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TSDictionaryRelationId, objid); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TSDictionaryRelationId, objid); + return list_make1(address); } @@ -825,7 +825,7 @@ TextSearchDictCommentObjectAddress(Node *node, bool missing_ok) * SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the * configuration does not exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); @@ -833,8 +833,14 @@ AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) Assert(stmt->objectType == OBJECT_TSCONFIGURATION); - return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock, - missing_ok); + ObjectAddress objectAddress = get_object_address(stmt->objectType, stmt->object, + &relation, AccessShareLock, + missing_ok); + + ObjectAddress *objectAddressCopy = palloc0(sizeof(ObjectAddress)); + *objectAddressCopy = objectAddress; + + return list_make1(objectAddressCopy); } @@ -843,16 +849,20 @@ AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) * SEARCH DICTIONARY for which the owner is changed. Optionally errors if the * configuration does not exist based on the missing_ok flag passed in by the caller. */ -ObjectAddress +List * AlterTextSearchDictOwnerObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); Relation relation = NULL; Assert(stmt->objectType == OBJECT_TSDICTIONARY); + ObjectAddress objectAddress = get_object_address(stmt->objectType, stmt->object, + &relation, AccessShareLock, + missing_ok); + ObjectAddress *objectAddressCopy = palloc0(sizeof(ObjectAddress)); + *objectAddressCopy = objectAddress; - return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock, - missing_ok); + return list_make1(objectAddressCopy); } diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 94f4f4cef..12d7253e2 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -241,7 +241,7 @@ PostprocessCreateTriggerStmt(Node *node, const char *queryString) * Never returns NULL, but the objid in the address can be invalid if missingOk * was set to true. */ -ObjectAddress +List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk) { CreateTrigStmt *createTriggerStmt = castNode(CreateTrigStmt, node); @@ -260,9 +260,9 @@ CreateTriggerStmtObjectAddress(Node *node, bool missingOk) triggerName, relationName))); } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TriggerRelationId, triggerId); - return address; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TriggerRelationId, triggerId); + return list_make1(address); } diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 9931f430a..9ebe51510 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -300,16 +300,16 @@ EnumValsList(Oid typeOid) * Never returns NULL, but the objid in the address could be invalid if missing_ok was set * to true. */ -ObjectAddress +List * CompositeTypeStmtObjectAddress(Node *node, bool missing_ok) { CompositeTypeStmt *stmt = castNode(CompositeTypeStmt, node); TypeName *typeName = MakeTypeNameFromRangeVar(stmt->typevar); Oid typeOid = LookupNonAssociatedArrayTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -321,16 +321,16 @@ CompositeTypeStmtObjectAddress(Node *node, bool missing_ok) * Never returns NULL, but the objid in the address could be invalid if missing_ok was set * to true. */ -ObjectAddress +List * CreateEnumStmtObjectAddress(Node *node, bool missing_ok) { CreateEnumStmt *stmt = castNode(CreateEnumStmt, node); TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); Oid typeOid = LookupNonAssociatedArrayTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -342,7 +342,7 @@ CreateEnumStmtObjectAddress(Node *node, bool missing_ok) * Never returns NULL, but the objid in the address could be invalid if missing_ok was set * to true. */ -ObjectAddress +List * AlterTypeStmtObjectAddress(Node *node, bool missing_ok) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); @@ -350,10 +350,10 @@ AlterTypeStmtObjectAddress(Node *node, bool missing_ok) TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -361,16 +361,16 @@ AlterTypeStmtObjectAddress(Node *node, bool missing_ok) * AlterEnumStmtObjectAddress return the ObjectAddress of the enum type that is the * object of the AlterEnumStmt. Errors is missing_ok is false. */ -ObjectAddress +List * AlterEnumStmtObjectAddress(Node *node, bool missing_ok) { AlterEnumStmt *stmt = castNode(AlterEnumStmt, node); TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -378,7 +378,7 @@ AlterEnumStmtObjectAddress(Node *node, bool missing_ok) * RenameTypeStmtObjectAddress returns the ObjectAddress of the type that is the object * of the RenameStmt. Errors if missing_ok is false. */ -ObjectAddress +List * RenameTypeStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -386,10 +386,10 @@ RenameTypeStmtObjectAddress(Node *node, bool missing_ok) TypeName *typeName = makeTypeNameFromNameList((List *) stmt->object); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -402,7 +402,7 @@ RenameTypeStmtObjectAddress(Node *node, bool missing_ok) * new schema. Errors if missing_ok is false and the type cannot be found in either of the * schemas. */ -ObjectAddress +List * AlterTypeSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -447,10 +447,10 @@ AlterTypeSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -462,7 +462,7 @@ AlterTypeSchemaStmtObjectAddress(Node *node, bool missing_ok) * changed as Attributes are not distributed on their own but as a side effect of the * whole type distribution. */ -ObjectAddress +List * RenameTypeAttributeStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -471,10 +471,10 @@ RenameTypeAttributeStmtObjectAddress(Node *node, bool missing_ok) TypeName *typeName = MakeTypeNameFromRangeVar(stmt->relation); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } @@ -482,7 +482,7 @@ RenameTypeAttributeStmtObjectAddress(Node *node, bool missing_ok) * AlterTypeOwnerObjectAddress returns the ObjectAddress of the type that is the object * of the AlterOwnerStmt. Errors if missing_ok is false. */ -ObjectAddress +List * AlterTypeOwnerObjectAddress(Node *node, bool missing_ok) { AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); @@ -490,10 +490,10 @@ AlterTypeOwnerObjectAddress(Node *node, bool missing_ok) TypeName *typeName = makeTypeNameFromNameList((List *) stmt->object); Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); - ObjectAddress address = { 0 }; - ObjectAddressSet(address, TypeRelationId, typeOid); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, TypeRelationId, typeOid); - return address; + return list_make1(address); } diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index adae0c84e..daf255652 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -152,17 +152,17 @@ PostprocessViewStmt(Node *node, const char *queryString) * ViewStmtObjectAddress returns the ObjectAddress for the subject of the * CREATE [OR REPLACE] VIEW statement. */ -ObjectAddress +List * ViewStmtObjectAddress(Node *node, bool missing_ok) { ViewStmt *stmt = castNode(ViewStmt, node); Oid viewOid = RangeVarGetRelid(stmt->view, NoLock, missing_ok); - ObjectAddress viewAddress = { 0 }; - ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + ObjectAddress *viewAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*viewAddress, RelationRelationId, viewOid); - return viewAddress; + return list_make1(viewAddress); } @@ -520,16 +520,16 @@ PostprocessAlterViewStmt(Node *node, const char *queryString) * AlterViewStmtObjectAddress returns the ObjectAddress for the subject of the * ALTER VIEW statement. */ -ObjectAddress +List * AlterViewStmtObjectAddress(Node *node, bool missing_ok) { AlterTableStmt *stmt = castNode(AlterTableStmt, node); Oid viewOid = RangeVarGetRelid(stmt->relation, NoLock, missing_ok); - ObjectAddress viewAddress = { 0 }; - ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + ObjectAddress *viewAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*viewAddress, RelationRelationId, viewOid); - return viewAddress; + return list_make1(viewAddress); } @@ -572,17 +572,17 @@ PreprocessRenameViewStmt(Node *node, const char *queryString, * RenameViewStmtObjectAddress returns the ObjectAddress of the view that is the object * of the RenameStmt. Errors if missing_ok is false. */ -ObjectAddress +List * RenameViewStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); Oid viewOid = RangeVarGetRelid(stmt->relation, NoLock, missing_ok); - ObjectAddress viewAddress = { 0 }; - ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + ObjectAddress *viewAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*viewAddress, RelationRelationId, viewOid); - return viewAddress; + return list_make1(viewAddress); } @@ -648,7 +648,7 @@ PostprocessAlterViewSchemaStmt(Node *node, const char *queryString) * AlterViewSchemaStmtObjectAddress returns the ObjectAddress of the view that is the object * of the alter schema statement. */ -ObjectAddress +List * AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); @@ -676,10 +676,10 @@ AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok) } } - ObjectAddress viewAddress = { 0 }; - ObjectAddressSet(viewAddress, RelationRelationId, viewOid); + ObjectAddress *viewAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*viewAddress, RelationRelationId, viewOid); - return viewAddress; + return list_make1(viewAddress); } diff --git a/src/backend/distributed/deparser/objectaddress.c b/src/backend/distributed/deparser/objectaddress.c index a9f1663bc..123445c23 100644 --- a/src/backend/distributed/deparser/objectaddress.c +++ b/src/backend/distributed/deparser/objectaddress.c @@ -33,11 +33,23 @@ GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok) ereport(ERROR, (errmsg("unsupported statement to get object address for"))); } - return ops->address(parseTree, missing_ok); + List *objectAddresses = ops->address(parseTree, missing_ok); + + if (list_length(objectAddresses) > 1) + { + ereport(ERROR, (errmsg( + "citus does not support multiple object addresses in GetObjectAddressFromParseTree"))); + } + + Assert(list_length(objectAddresses) == 1); + + ObjectAddress *objectAddress = linitial(objectAddresses); + + return *objectAddress; } -ObjectAddress +List * RenameAttributeStmtObjectAddress(Node *node, bool missing_ok) { RenameStmt *stmt = castNode(RenameStmt, node); @@ -67,11 +79,11 @@ RenameAttributeStmtObjectAddress(Node *node, bool missing_ok) * Never returns NULL, but the objid in the address could be invalid if missing_ok was set * to true. */ -ObjectAddress +List * CreateExtensionStmtObjectAddress(Node *node, bool missing_ok) { CreateExtensionStmt *stmt = castNode(CreateExtensionStmt, node); - ObjectAddress address = { 0 }; + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); const char *extensionName = stmt->extname; @@ -85,7 +97,7 @@ CreateExtensionStmtObjectAddress(Node *node, bool missing_ok) extensionName))); } - ObjectAddressSet(address, ExtensionRelationId, extensionoid); + ObjectAddressSet(*address, ExtensionRelationId, extensionoid); - return address; + return list_make1(address); } diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 370810cfa..0bc565aff 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -63,7 +63,7 @@ typedef struct DistributeObjectOps void (*qualify)(Node *); List * (*preprocess)(Node *, const char *, ProcessUtilityContext); List * (*postprocess)(Node *, const char *); - ObjectAddress (*address)(Node *, bool); + List * (*address)(Node *, bool); bool markDistributed; /* fields used by common implementations, omitted for specialized implementations */ @@ -159,24 +159,24 @@ extern bool CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *d /* collation.c - forward declarations */ extern char * CreateCollationDDL(Oid collationId); extern List * CreateCollationDDLsIdempotent(Oid collationId); -extern ObjectAddress AlterCollationOwnerObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress RenameCollationStmtObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress AlterCollationSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); +extern List * AlterCollationOwnerObjectAddress(Node *stmt, bool missing_ok); +extern List * RenameCollationStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * AlterCollationSchemaStmtObjectAddress(Node *stmt, + bool missing_ok); extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address); -extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); /* database.c - forward declarations */ -extern ObjectAddress AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok); +extern List * AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok); extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address); /* domain.c - forward declarations */ -extern ObjectAddress CreateDomainStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterDomainStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress DomainRenameConstraintStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress RenameDomainStmtObjectAddress(Node *node, bool missing_ok); +extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterDomainStmtObjectAddress(Node *node, bool missing_ok); +extern List * DomainRenameConstraintStmtObjectAddress(Node *node, + bool missing_ok); +extern List * AlterDomainOwnerStmtObjectAddress(Node *node, bool missing_ok); +extern List * RenameDomainStmtObjectAddress(Node *node, bool missing_ok); extern CreateDomainStmt * RecreateDomainStmt(Oid domainOid); extern Oid get_constraint_typid(Oid conoid); @@ -208,10 +208,10 @@ extern List * PreprocessAlterExtensionContentsStmt(Node *node, ProcessUtilityContext processUtilityContext); extern List * CreateExtensionDDLCommand(const ObjectAddress *extensionAddress); -extern ObjectAddress AlterExtensionSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress AlterExtensionUpdateStmtObjectAddress(Node *stmt, - bool missing_ok); +extern List * AlterExtensionSchemaStmtObjectAddress(Node *stmt, + bool missing_ok); +extern List * AlterExtensionUpdateStmtObjectAddress(Node *stmt, + bool missing_ok); extern void CreateExtensionWithVersion(char *extname, char *extVersion); extern void AlterExtensionUpdateStmt(char *extname, char *extVersion); extern int GetExtensionVersionNumber(char *extVersion); @@ -263,11 +263,11 @@ extern Acl * GetPrivilegesForFDW(Oid FDWOid); extern List * PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern ObjectAddress CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterForeignServerOwnerStmtObjectAddress(Node *node, bool - missing_ok); +extern List * CreateForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern List * RenameForeignServerStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterForeignServerOwnerStmtObjectAddress(Node *node, bool + missing_ok); extern List * GetForeignServerCreateDDLCommand(Oid serverId); @@ -282,26 +282,26 @@ extern List * PreprocessCreateFunctionStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessCreateFunctionStmt(Node *stmt, const char *queryString); -extern ObjectAddress CreateFunctionStmtObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress DefineAggregateStmtObjectAddress(Node *stmt, - bool missing_ok); +extern List * CreateFunctionStmtObjectAddress(Node *stmt, + bool missing_ok); +extern List * DefineAggregateStmtObjectAddress(Node *stmt, + bool missing_ok); extern List * PreprocessAlterFunctionStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); -extern ObjectAddress AlterFunctionStmtObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress RenameFunctionStmtObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress AlterFunctionOwnerObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress AlterFunctionSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); +extern List * AlterFunctionStmtObjectAddress(Node *stmt, + bool missing_ok); +extern List * RenameFunctionStmtObjectAddress(Node *stmt, + bool missing_ok); +extern List * AlterFunctionOwnerObjectAddress(Node *stmt, + bool missing_ok); +extern List * AlterFunctionSchemaStmtObjectAddress(Node *stmt, + bool missing_ok); extern List * PreprocessAlterFunctionDependsStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); -extern ObjectAddress AlterFunctionDependsStmtObjectAddress(Node *stmt, - bool missing_ok); +extern List * AlterFunctionDependsStmtObjectAddress(Node *stmt, + bool missing_ok); extern List * PreprocessGrantOnFunctionStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessGrantOnFunctionStmt(Node *node, const char *queryString); @@ -340,7 +340,7 @@ extern List * ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor extern bool IsReindexWithParam_compat(ReindexStmt *stmt, char *paramName); /* objectaddress.c - forward declarations */ -extern ObjectAddress CreateExtensionStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * CreateExtensionStmtObjectAddress(Node *stmt, bool missing_ok); /* policy.c - forward declarations */ @@ -376,10 +376,10 @@ extern List * PostprocessAlterRoleStmt(Node *stmt, const char *queryString); extern List * PreprocessAlterRoleSetStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * GenerateAlterRoleSetCommandForRole(Oid roleid); -extern ObjectAddress AlterRoleStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterRoleSetStmtObjectAddress(Node *node, - bool missing_ok); +extern List * AlterRoleStmtObjectAddress(Node *node, + bool missing_ok); +extern List * AlterRoleSetStmtObjectAddress(Node *node, + bool missing_ok); extern List * PreprocessCreateRoleStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessDropRoleStmt(Node *stmt, const char *queryString, @@ -388,7 +388,7 @@ extern List * PreprocessGrantRoleStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessGrantRoleStmt(Node *stmt, const char *queryString); extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid); -ObjectAddress CreateRoleStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * CreateRoleStmtObjectAddress(Node *stmt, bool missing_ok); extern void UnmarkRolesDistributed(List *roles); extern List * FilterDistributedRoles(List *roles); @@ -402,8 +402,8 @@ extern List * PreprocessAlterObjectSchemaStmt(Node *alterObjectSchemaStmt, const char *alterObjectSchemaCommand); extern List * PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern ObjectAddress CreateSchemaStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok); +extern List * CreateSchemaStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok); /* sequence.c - forward declarations */ extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString, @@ -422,10 +422,10 @@ extern List * PreprocessRenameSequenceStmt(Node *node, const char *queryString, extern List * PreprocessGrantOnSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessGrantOnSequenceStmt(Node *node, const char *queryString); -extern ObjectAddress AlterSequenceStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress RenameSequenceStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterSequenceStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterSequenceSchemaStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterSequenceOwnerStmtObjectAddress(Node *node, bool missing_ok); +extern List * RenameSequenceStmtObjectAddress(Node *node, bool missing_ok); extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt); extern void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt); extern char * GenerateBackupNameForSequenceCollision(const ObjectAddress *address); @@ -436,7 +436,7 @@ extern void RenameExistingSequenceWithDifferentTypeIfExists(RangeVar *sequence, extern List * PreprocessCreateStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessCreateStatisticsStmt(Node *node, const char *queryString); -extern ObjectAddress CreateStatisticsStmtObjectAddress(Node *node, bool missingOk); +extern List * CreateStatisticsStmtObjectAddress(Node *node, bool missingOk); extern List * PreprocessDropStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessAlterStatisticsRenameStmt(Node *node, const char *queryString, @@ -446,7 +446,7 @@ extern List * PreprocessAlterStatisticsSchemaStmt(Node *node, const char *queryS ProcessUtilityContext processUtilityContext); extern List * PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString); -extern ObjectAddress AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk); +extern List * AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk); extern List * PreprocessAlterStatisticsStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString, @@ -487,8 +487,8 @@ extern void ErrorUnsupportedAlterTableAddColumn(Oid relationId, AlterTableCmd *c extern void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, char referencingReplicationModel, Var *distributionColumn, uint32 colocationId); -extern ObjectAddress AlterTableSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); +extern List * AlterTableSchemaStmtObjectAddress(Node *stmt, + bool missing_ok); extern List * MakeNameListFromRangeVar(const RangeVar *rel); extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum); extern bool ConstrTypeUsesIndex(ConstrType constrType); @@ -499,30 +499,30 @@ extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address); extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *address); extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address); extern List * CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address); -extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress CreateTextSearchDictObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress RenameTextSearchDictionaryStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterTextSearchDictionaryStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress TextSearchDictCommentObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node, - bool missing_ok); -extern ObjectAddress AlterTextSearchDictOwnerObjectAddress(Node *node, - bool missing_ok); +extern List * CreateTextSearchConfigurationObjectAddress(Node *node, + bool missing_ok); +extern List * CreateTextSearchDictObjectAddress(Node *node, + bool missing_ok); +extern List * RenameTextSearchConfigurationStmtObjectAddress(Node *node, + bool missing_ok); +extern List * RenameTextSearchDictionaryStmtObjectAddress(Node *node, + bool missing_ok); +extern List * AlterTextSearchConfigurationStmtObjectAddress(Node *node, + bool missing_ok); +extern List * AlterTextSearchDictionaryStmtObjectAddress(Node *node, + bool missing_ok); +extern List * AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, + bool missing_ok); +extern List * AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, + bool missing_ok); +extern List * TextSearchConfigurationCommentObjectAddress(Node *node, + bool missing_ok); +extern List * TextSearchDictCommentObjectAddress(Node *node, + bool missing_ok); +extern List * AlterTextSearchConfigurationOwnerObjectAddress(Node *node, + bool missing_ok); +extern List * AlterTextSearchDictOwnerObjectAddress(Node *node, + bool missing_ok); extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address); extern char * GenerateBackupNameForTextSearchDict(const ObjectAddress *address); extern List * get_ts_config_namelist(Oid tsconfigOid); @@ -535,16 +535,16 @@ extern List * PreprocessRenameTypeAttributeStmt(Node *stmt, const char *queryStr ProcessUtilityContext processUtilityContext); extern Node * CreateTypeStmtByObjectAddress(const ObjectAddress *address); -extern ObjectAddress CompositeTypeStmtObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress CreateEnumStmtObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress AlterTypeStmtObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress AlterEnumStmtObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress RenameTypeStmtObjectAddress(Node *stmt, bool missing_ok); -extern ObjectAddress AlterTypeSchemaStmtObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress RenameTypeAttributeStmtObjectAddress(Node *stmt, - bool missing_ok); -extern ObjectAddress AlterTypeOwnerObjectAddress(Node *stmt, bool missing_ok); +extern List * CompositeTypeStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * CreateEnumStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * AlterTypeStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * AlterEnumStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * RenameTypeStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * AlterTypeSchemaStmtObjectAddress(Node *stmt, + bool missing_ok); +extern List * RenameTypeAttributeStmtObjectAddress(Node *stmt, + bool missing_ok); +extern List * AlterTypeOwnerObjectAddress(Node *stmt, bool missing_ok); extern List * CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress); extern char * GenerateBackupNameForTypeCollision(const ObjectAddress *address); @@ -565,8 +565,8 @@ extern List * PostprocessVacuumStmt(Node *node, const char *vacuumCommand); extern List * PreprocessViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessViewStmt(Node *node, const char *queryString); -extern ObjectAddress ViewStmtObjectAddress(Node *node, bool missing_ok); -extern ObjectAddress AlterViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * ViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterViewStmtObjectAddress(Node *node, bool missing_ok); extern List * PreprocessDropViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern char * CreateViewDDLCommand(Oid viewOid); @@ -582,11 +582,11 @@ extern List * PreprocessAlterViewStmt(Node *node, const char *queryString, extern List * PostprocessAlterViewStmt(Node *node, const char *queryString); extern List * PreprocessRenameViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern ObjectAddress RenameViewStmtObjectAddress(Node *node, bool missing_ok); +extern List * RenameViewStmtObjectAddress(Node *node, bool missing_ok); extern List * PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PostprocessAlterViewSchemaStmt(Node *node, const char *queryString); -extern ObjectAddress AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok); +extern List * AlterViewSchemaStmtObjectAddress(Node *node, bool missing_ok); extern bool IsViewRenameStmt(RenameStmt *renameStmt); /* trigger.c - forward declarations */ @@ -594,7 +594,7 @@ extern List * GetExplicitTriggerCommandList(Oid relationId); extern HeapTuple GetTriggerTupleById(Oid triggerId, bool missingOk); extern List * GetExplicitTriggerIdList(Oid relationId); extern List * PostprocessCreateTriggerStmt(Node *node, const char *queryString); -extern ObjectAddress CreateTriggerStmtObjectAddress(Node *node, bool missingOk); +extern List * CreateTriggerStmtObjectAddress(Node *node, bool missingOk); extern void CreateTriggerEventExtendNames(CreateTrigStmt *createTriggerStmt, char *schemaName, uint64 shardId); extern List * PostprocessAlterTriggerRenameStmt(Node *node, const char *queryString); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index f036d7ea8..f3bdb19b2 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -149,7 +149,7 @@ extern char * GetTypeNamespaceNameByNameList(List *names); extern Oid TypeOidGetNamespaceOid(Oid typeOid); extern ObjectAddress GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok); -extern ObjectAddress RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok); /* forward declarations for deparse_view_stmts.c */ extern void QualifyDropViewStmt(Node *node); From ebb6d1c8c00c29beb08b162ea87edc11bf8c7814 Mon Sep 17 00:00:00 2001 From: aykutbozkurt Date: Thu, 14 Jul 2022 16:33:08 +0300 Subject: [PATCH 04/10] refactor code where GetObjectAddressFromParseTree is called because it returns list of addresses now --- .../distributed/commands/alter_table.c | 9 +- .../citus_add_local_table_to_metadata.c | 6 +- src/backend/distributed/commands/common.c | 37 +++++--- .../commands/create_distributed_table.c | 7 +- .../distributed/commands/dependencies.c | 70 +++++++++++++-- src/backend/distributed/commands/extension.c | 23 +++-- .../commands/foreign_data_wrapper.c | 10 ++- .../distributed/commands/foreign_server.c | 12 +-- src/backend/distributed/commands/function.c | 75 ++++++++++------ src/backend/distributed/commands/grant.c | 6 +- src/backend/distributed/commands/index.c | 8 +- src/backend/distributed/commands/role.c | 30 ++++--- src/backend/distributed/commands/schema.c | 7 +- src/backend/distributed/commands/sequence.c | 87 +++++++++++------- src/backend/distributed/commands/statistics.c | 21 +++-- src/backend/distributed/commands/table.c | 32 ++++--- src/backend/distributed/commands/trigger.c | 8 +- src/backend/distributed/commands/type.c | 8 +- .../distributed/commands/utility_hook.c | 8 +- src/backend/distributed/commands/view.c | 87 ++++++++++++------ .../distributed/deparser/objectaddress.c | 20 +---- src/backend/distributed/metadata/dependency.c | 90 ++++++++++++++++--- src/backend/distributed/metadata/distobject.c | 36 ++++++-- .../distributed/metadata/metadata_sync.c | 15 ++-- .../distributed/operations/create_shards.c | 6 +- .../distributed/operations/stage_protocol.c | 6 +- .../worker/worker_create_or_replace.c | 15 ++-- .../distributed/worker/worker_drop_protocol.c | 18 ++-- src/include/distributed/deparser.h | 2 +- src/include/distributed/metadata/dependency.h | 7 +- src/include/distributed/metadata/distobject.h | 6 +- src/include/distributed/metadata_utility.h | 6 +- 32 files changed, 525 insertions(+), 253 deletions(-) diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 9c0f07be3..8258919f4 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -1302,7 +1302,7 @@ ErrorIfUnsupportedCascadeObjects(Oid relationId) * * Extension dependency is different than the rest. If an object depends on an extension * dropping the object would drop the extension too. - * So we check with IsObjectAddressOwnedByExtension function. + * So we check with IsAnyObjectAddressOwnedByExtension function. */ static bool DoesCascadeDropUnsupportedObject(Oid classId, Oid objectId, HTAB *nodeMap) @@ -1315,10 +1315,9 @@ DoesCascadeDropUnsupportedObject(Oid classId, Oid objectId, HTAB *nodeMap) return false; } - ObjectAddress objectAddress = { 0 }; - ObjectAddressSet(objectAddress, classId, objectId); - - if (IsObjectAddressOwnedByExtension(&objectAddress, NULL)) + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, classId, objectId); + if (IsAnyObjectAddressOwnedByExtension(list_make1(objectAddress), NULL)) { return true; } diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index 9d58cc716..0e2bd0ecd 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -307,8 +307,8 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve } } - ObjectAddress tableAddress = { 0 }; - ObjectAddressSet(tableAddress, RelationRelationId, relationId); + ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*tableAddress, RelationRelationId, relationId); /* * Ensure that the sequences used in column defaults of the table @@ -320,7 +320,7 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve * Ensure dependencies exist as we will create shell table on the other nodes * in the MX case. */ - EnsureDependenciesExistOnAllNodes(&tableAddress); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress)); /* * Make sure that existing reference tables have been replicated to all diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 1c6a71de3..29cb96e9a 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -61,22 +61,26 @@ PostprocessCreateDistributedObjectFromCatalogStmt(Node *stmt, const char *queryS return NIL; } - ObjectAddress address = GetObjectAddressFromParseTree(stmt, false); + List *addresses = GetObjectAddressListFromParseTree(stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); EnsureCoordinator(); EnsureSequentialMode(ops->objectType); /* If the object has any unsupported dependency warn, and only create locally */ - DeferredErrorMessage *depError = DeferErrorIfHasUnsupportedDependency(&address); + DeferredErrorMessage *depError = DeferErrorIfAnyObjectHasUnsupportedDependency( + addresses); if (depError != NULL) { RaiseDeferredError(depError, WARNING); return NIL; } - EnsureDependenciesExistOnAllNodes(&address); + EnsureAllObjectDependenciesExistOnAllNodes(addresses); - List *commands = GetDependencyCreateDDLCommands(&address); + List *commands = GetAllDependencyCreateDDLCommands(addresses); commands = lcons(DISABLE_DDL_PROPAGATION, commands); commands = lappend(commands, ENABLE_DDL_PROPAGATION); @@ -111,8 +115,12 @@ PreprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString, const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); Assert(ops != NULL); - ObjectAddress address = GetObjectAddressFromParseTree(stmt, false); - if (!ShouldPropagateObject(&address)) + List *addresses = GetObjectAddressListFromParseTree(stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!ShouldPropagateAnyObject(addresses)) { return NIL; } @@ -156,8 +164,12 @@ PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString) const DistributeObjectOps *ops = GetDistributeObjectOps(stmt); Assert(ops != NULL); - ObjectAddress address = GetObjectAddressFromParseTree(stmt, false); - if (!ShouldPropagateObject(&address)) + List *addresses = GetObjectAddressListFromParseTree(stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!ShouldPropagateAnyObject(addresses)) { return NIL; } @@ -168,7 +180,7 @@ PostprocessAlterDistributedObjectStmt(Node *stmt, const char *queryString) return NIL; } - EnsureDependenciesExistOnAllNodes(&address); + EnsureAllObjectDependenciesExistOnAllNodes(addresses); return NIL; } @@ -223,11 +235,10 @@ PreprocessDropDistributedObjectStmt(Node *node, const char *queryString, Relation rel = NULL; /* not used, but required to pass to get_object_address */ ObjectAddress address = get_object_address(stmt->removeType, object, &rel, AccessShareLock, stmt->missing_ok); - if (IsObjectDistributed(&address)) + ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress)); + *addressPtr = address; + if (IsAnyObjectDistributed(list_make1(addressPtr))) { - ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress)); - *addressPtr = address; - distributedObjects = lappend(distributedObjects, object); distributedObjectAddresses = lappend(distributedObjectAddresses, addressPtr); } diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 26a905f23..1416cbb3b 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -442,10 +442,9 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName, * via their own connection and committed immediately so they become visible to all * sessions creating shards. */ - ObjectAddress tableAddress = { 0 }; - ObjectAddressSet(tableAddress, RelationRelationId, relationId); - - EnsureDependenciesExistOnAllNodes(&tableAddress); + ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*tableAddress, RelationRelationId, relationId); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress)); char replicationModel = DecideReplicationModel(distributionMethod, colocateWithTableName, diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 2f8182fe6..6329cf6f4 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -36,6 +36,9 @@ static void ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress); static int ObjectAddressComparator(const void *a, const void *b); static List * FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate predicate); +static void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target); +static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency); +static bool ShouldPropagateObject(const ObjectAddress *address); /* * EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes @@ -51,7 +54,7 @@ static List * FilterObjectAddressListByPredicate(List *objectAddressList, * This is solved by creating the dependencies in an idempotent manner, either via * postgres native CREATE IF NOT EXISTS, or citus helper functions. */ -void +static void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) { List *dependenciesWithCommands = NIL; @@ -142,6 +145,21 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) } +/* + * EnsureAllObjectDependenciesExistOnAllNodes iteratively calls EnsureDependenciesExistOnAllNodes + * for given targets. + */ +void +EnsureAllObjectDependenciesExistOnAllNodes(const List *targets) +{ + ObjectAddress *target = NULL; + foreach_ptr(target, targets) + { + EnsureDependenciesExistOnAllNodes(target); + } +} + + /* * EnsureDependenciesCanBeDistributed ensures all dependencies of the given object * can be distributed. @@ -153,7 +171,8 @@ EnsureDependenciesCanBeDistributed(const ObjectAddress *objectAddress) ErrorIfCircularDependencyExists(objectAddress); /* If the object has any unsupported dependency, error out */ - DeferredErrorMessage *depError = DeferErrorIfHasUnsupportedDependency(objectAddress); + DeferredErrorMessage *depError = DeferErrorIfAnyObjectHasUnsupportedDependency( + list_make1((ObjectAddress *) objectAddress)); if (depError != NULL) { @@ -310,7 +329,7 @@ GetDistributableDependenciesForObject(const ObjectAddress *target) * GetDependencyCreateDDLCommands returns a list (potentially empty or NIL) of ddl * commands to execute on a worker to create the object. */ -List * +static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency) { switch (getObjectClass(dependency)) @@ -488,6 +507,25 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) } +/* + * GetAllDependencyCreateDDLCommands iteratively calls GetDependencyCreateDDLCommands + * for given dependencies. + */ +List * +GetAllDependencyCreateDDLCommands(const List *dependencies) +{ + List *commands = NIL; + + ObjectAddress *dependency = NULL; + foreach_ptr(dependency, dependencies) + { + commands = list_concat(commands, GetDependencyCreateDDLCommands(dependency)); + } + + return commands; +} + + /* * ReplicateAllObjectsToNodeCommandList returns commands to replicate all * previously marked objects to a worker node. The function also sets @@ -531,7 +569,7 @@ ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort) ObjectAddress *dependency = NULL; foreach_ptr(dependency, dependencies) { - if (IsObjectAddressOwnedByExtension(dependency, NULL)) + if (IsAnyObjectAddressOwnedByExtension(list_make1(dependency), NULL)) { /* * we expect extension-owned objects to be created as a result @@ -663,7 +701,7 @@ ShouldPropagateCreateInCoordinatedTransction() * ShouldPropagateObject determines if we should be propagating DDLs based * on their object address. */ -bool +static bool ShouldPropagateObject(const ObjectAddress *address) { if (!ShouldPropagate()) @@ -671,7 +709,7 @@ ShouldPropagateObject(const ObjectAddress *address) return false; } - if (!IsObjectDistributed(address)) + if (!IsAnyObjectDistributed(list_make1((ObjectAddress *) address))) { /* do not propagate for non-distributed types */ return false; @@ -681,6 +719,26 @@ ShouldPropagateObject(const ObjectAddress *address) } +/* + * ShouldPropagateAnyObject determines if we should be propagating DDLs based + * on their object addresses. + */ +bool +ShouldPropagateAnyObject(List *addresses) +{ + ObjectAddress *address = NULL; + foreach_ptr(address, addresses) + { + if (ShouldPropagateObject(address)) + { + return true; + } + } + + return false; +} + + /* * FilterObjectAddressListByPredicate takes a list of ObjectAddress *'s and returns a list * only containing the ObjectAddress *'s for which the predicate returned true. diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index e4979f035..122c68dfa 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -181,9 +181,12 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString) (void *) createExtensionStmtSql, ENABLE_DDL_PROPAGATION); - ObjectAddress extensionAddress = GetObjectAddressFromParseTree(node, false); + List *extensionAddresses = GetObjectAddressListFromParseTree(node, false); - EnsureDependenciesExistOnAllNodes(&extensionAddress); + /* the code-path only supports a single object */ + Assert(list_length(extensionAddresses) == 1); + + EnsureAllObjectDependenciesExistOnAllNodes(extensionAddresses); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -319,10 +322,9 @@ FilterDistributedExtensions(List *extensionObjectList) continue; } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, ExtensionRelationId, extensionOid); - - if (!IsObjectDistributed(&address)) + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, ExtensionRelationId, extensionOid); + if (!IsAnyObjectDistributed(list_make1(address))) { continue; } @@ -411,7 +413,10 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString, List * PostprocessAlterExtensionSchemaStmt(Node *node, const char *queryString) { - ObjectAddress extensionAddress = GetObjectAddressFromParseTree(node, false); + List *extensionAddresses = GetObjectAddressListFromParseTree(node, false); + + /* the code-path only supports a single object */ + Assert(list_length(extensionAddresses) == 1); if (!ShouldPropagateExtensionCommand(node)) { @@ -419,7 +424,7 @@ PostprocessAlterExtensionSchemaStmt(Node *node, const char *queryString) } /* dependencies (schema) have changed let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&extensionAddress); + EnsureAllObjectDependenciesExistOnAllNodes(extensionAddresses); return NIL; } @@ -504,7 +509,7 @@ PostprocessAlterExtensionCitusUpdateStmt(Node *node) * * Note that this function is not responsible for ensuring if dependencies exist on * nodes and satisfying these dependendencies if not exists, which is already done by - * EnsureDependenciesExistOnAllNodes on demand. Hence, this function is just designed + * EnsureAllObjectDependenciesExistOnAllNodes on demand. Hence, this function is just designed * to be used when "ALTER EXTENSION citus UPDATE" is executed. * This is because we want to add existing objects that would have already been in * pg_dist_object if we had created them in new version of Citus to pg_dist_object. diff --git a/src/backend/distributed/commands/foreign_data_wrapper.c b/src/backend/distributed/commands/foreign_data_wrapper.c index 9cfd5f10e..c9a08c41a 100644 --- a/src/backend/distributed/commands/foreign_data_wrapper.c +++ b/src/backend/distributed/commands/foreign_data_wrapper.c @@ -64,6 +64,7 @@ PreprocessGrantOnFDWStmt(Node *node, const char *queryString, EnsureCoordinator(); + /* the code-path only supports a single object */ Assert(list_length(stmt->objects) == 1); char *sql = DeparseTreeNode((Node *) stmt); @@ -87,12 +88,15 @@ NameListHasFDWOwnedByDistributedExtension(List *FDWNames) foreach_ptr(FDWValue, FDWNames) { /* captures the extension address during lookup */ - ObjectAddress extensionAddress = { 0 }; + ObjectAddress *extensionAddress = palloc0(sizeof(ObjectAddress)); ObjectAddress FDWAddress = GetObjectAddressByFDWName(strVal(FDWValue), false); - if (IsObjectAddressOwnedByExtension(&FDWAddress, &extensionAddress)) + ObjectAddress *copyFDWAddress = palloc0(sizeof(ObjectAddress)); + *copyFDWAddress = FDWAddress; + if (IsAnyObjectAddressOwnedByExtension(list_make1(copyFDWAddress), + extensionAddress)) { - if (IsObjectDistributed(&extensionAddress)) + if (IsAnyObjectDistributed(list_make1(extensionAddress))) { return true; } diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index 36b6094a9..d98393e48 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -102,6 +102,7 @@ PreprocessGrantOnForeignServerStmt(Node *node, const char *queryString, EnsureCoordinator(); + /* the code-path only supports a single object */ Assert(list_length(stmt->objects) == 1); char *sql = DeparseTreeNode((Node *) stmt); @@ -247,15 +248,14 @@ NameListHasDistributedServer(List *serverNames) foreach_ptr(serverValue, serverNames) { List *addresses = GetObjectAddressByServerName(strVal(serverValue), false); - if (list_length(addresses) > 1) - { - ereport(ERROR, errmsg( - "citus does not support multiple object addresses in NameListHasDistributedServer")); - } + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + /* We have already asserted that we have exactly 1 address in the addresses. */ ObjectAddress *address = linitial(addresses); - if (IsObjectDistributed(address)) + if (IsAnyObjectDistributed(list_make1(address))) { return true; } diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index ccf2ef2f3..048c0a299 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -128,7 +128,7 @@ create_distributed_function(PG_FUNCTION_ARGS) text *colocateWithText = NULL; /* optional */ StringInfoData ddlCommand = { 0 }; - ObjectAddress functionAddress = { 0 }; + ObjectAddress *functionAddress = palloc0(sizeof(ObjectAddress)); Oid distributionArgumentOid = InvalidOid; bool colocatedWithReferenceTable = false; @@ -203,9 +203,9 @@ create_distributed_function(PG_FUNCTION_ARGS) EnsureCoordinator(); EnsureFunctionOwner(funcOid); - ObjectAddressSet(functionAddress, ProcedureRelationId, funcOid); + ObjectAddressSet(*functionAddress, ProcedureRelationId, funcOid); - if (RecreateSameNonColocatedFunction(functionAddress, + if (RecreateSameNonColocatedFunction(*functionAddress, distributionArgumentName, colocateWithTableNameDefault, forceDelegationAddress)) @@ -224,9 +224,10 @@ create_distributed_function(PG_FUNCTION_ARGS) * pg_dist_object, and not propagate the CREATE FUNCTION. Function * will be created by the virtue of the extension creation. */ - if (IsObjectAddressOwnedByExtension(&functionAddress, &extensionAddress)) + if (IsAnyObjectAddressOwnedByExtension(list_make1(functionAddress), + &extensionAddress)) { - EnsureExtensionFunctionCanBeDistributed(functionAddress, extensionAddress, + EnsureExtensionFunctionCanBeDistributed(*functionAddress, extensionAddress, distributionArgumentName); } else @@ -237,7 +238,7 @@ create_distributed_function(PG_FUNCTION_ARGS) */ EnsureSequentialMode(OBJECT_FUNCTION); - EnsureDependenciesExistOnAllNodes(&functionAddress); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(functionAddress)); const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); @@ -257,7 +258,7 @@ create_distributed_function(PG_FUNCTION_ARGS) ddlCommand.data); } - MarkObjectDistributed(&functionAddress); + MarkObjectDistributed(functionAddress); if (distributionArgumentName != NULL) { @@ -272,12 +273,12 @@ create_distributed_function(PG_FUNCTION_ARGS) distributionArgumentOid, colocateWithTableName, forceDelegationAddress, - &functionAddress); + functionAddress); } else if (!colocatedWithReferenceTable) { DistributeFunctionColocatedWithDistributedTable(funcOid, colocateWithTableName, - &functionAddress); + functionAddress); } else if (colocatedWithReferenceTable) { @@ -288,7 +289,7 @@ create_distributed_function(PG_FUNCTION_ARGS) */ ErrorIfAnyNodeDoesNotHaveMetadata(); - DistributeFunctionColocatedWithReferenceTable(&functionAddress); + DistributeFunctionColocatedWithReferenceTable(functionAddress); } PG_RETURN_VOID(); @@ -1308,7 +1309,7 @@ ShouldPropagateAlterFunction(const ObjectAddress *address) return false; } - if (!IsObjectDistributed(address)) + if (!IsAnyObjectDistributed(list_make1((ObjectAddress *) address))) { /* do not propagate alter function for non-distributed functions */ return false; @@ -1373,15 +1374,19 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) return NIL; } - ObjectAddress functionAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + List *functionAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); - if (IsObjectAddressOwnedByExtension(&functionAddress, NULL)) + /* the code-path only supports a single object */ + Assert(list_length(functionAddresses) == 1); + + if (IsAnyObjectAddressOwnedByExtension(functionAddresses, NULL)) { return NIL; } /* If the function has any unsupported dependency, create it locally */ - DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&functionAddress); + DeferredErrorMessage *errMsg = DeferErrorIfAnyObjectHasUnsupportedDependency( + functionAddresses); if (errMsg != NULL) { @@ -1389,11 +1394,14 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) return NIL; } - EnsureDependenciesExistOnAllNodes(&functionAddress); + EnsureAllObjectDependenciesExistOnAllNodes(functionAddresses); + + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *functionAddress = linitial(functionAddresses); List *commands = list_make1(DISABLE_DDL_PROPAGATION); commands = list_concat(commands, CreateFunctionDDLCommandsIdempotent( - &functionAddress)); + functionAddress)); commands = list_concat(commands, list_make1(ENABLE_DDL_PROPAGATION)); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); @@ -1494,8 +1502,15 @@ PreprocessAlterFunctionStmt(Node *node, const char *queryString, AlterFunctionStmt *stmt = castNode(AlterFunctionStmt, node); AssertObjectTypeIsFunctional(stmt->objtype); - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateAlterFunction(&address)) + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *address = linitial(addresses); + + if (!ShouldPropagateAlterFunction(address)) { return NIL; } @@ -1549,20 +1564,26 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString, return NIL; } - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!IsObjectDistributed(&address)) + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!IsAnyObjectDistributed(addresses)) { return NIL; } + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *address = linitial(addresses); + /* * Distributed objects should not start depending on an extension, this will break * the dependency resolving mechanism we use to replicate distributed objects to new * workers */ - const char *functionName = - getObjectIdentity_compat(&address, /* missingOk: */ false); + getObjectIdentity_compat(address, /* missingOk: */ false); ereport(ERROR, (errmsg("distrtibuted functions are not allowed to depend on an " "extension"), errdetail("Function \"%s\" is already distributed. Functions from " @@ -1920,7 +1941,7 @@ EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, /* * Ensure corresponding extension is in pg_dist_object. * Functions owned by an extension are depending internally on that extension, - * hence EnsureDependenciesExistOnAllNodes() creates the extension, which in + * hence EnsureAllObjectDependenciesExistOnAllNodes() creates the extension, which in * turn creates the function, and thus we don't have to create it ourself like * we do for non-extension functions. */ @@ -1930,7 +1951,9 @@ EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, get_extension_name(extensionAddress.objectId), get_func_name(functionAddress.objectId)))); - EnsureDependenciesExistOnAllNodes(&functionAddress); + ObjectAddress *copyFunctionAddress = palloc0(sizeof(ObjectAddress)); + *copyFunctionAddress = functionAddress; + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(copyFunctionAddress)); } @@ -2004,7 +2027,7 @@ PostprocessGrantOnFunctionStmt(Node *node, const char *queryString) ObjectAddress *functionAddress = NULL; foreach_ptr(functionAddress, distributedFunctions) { - EnsureDependenciesExistOnAllNodes(functionAddress); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(functionAddress)); } return NIL; } @@ -2083,7 +2106,7 @@ FilterDistributedFunctions(GrantStmt *grantStmt) * if this function from GRANT .. ON FUNCTION .. is a distributed * function, add it to the list */ - if (IsObjectDistributed(functionAddress)) + if (IsAnyObjectDistributed(list_make1(functionAddress))) { grantFunctionList = lappend(grantFunctionList, functionAddress); } diff --git a/src/backend/distributed/commands/grant.c b/src/backend/distributed/commands/grant.c index bf7c39b3b..c7861060a 100644 --- a/src/backend/distributed/commands/grant.c +++ b/src/backend/distributed/commands/grant.c @@ -238,9 +238,9 @@ CollectGrantTableIdList(GrantStmt *grantStmt) } /* check for distributed sequences included in GRANT ON TABLE statement */ - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, relationId); - if (IsObjectDistributed(&sequenceAddress)) + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, relationId); + if (IsAnyObjectDistributed(list_make1(sequenceAddress))) { grantTableList = lappend_oid(grantTableList, relationId); } diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 2e9d2d58d..ee7098289 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -761,9 +761,9 @@ PostprocessIndexStmt(Node *node, const char *queryString) Oid indexRelationId = get_relname_relid(indexStmt->idxname, schemaId); /* ensure dependencies of index exist on all nodes */ - ObjectAddress address = { 0 }; - ObjectAddressSet(address, RelationRelationId, indexRelationId); - EnsureDependenciesExistOnAllNodes(&address); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, RelationRelationId, indexRelationId); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(address)); /* furtheron we are only processing CONCURRENT index statements */ if (!indexStmt->concurrent) @@ -772,7 +772,7 @@ PostprocessIndexStmt(Node *node, const char *queryString) } /* - * EnsureDependenciesExistOnAllNodes could have distributed objects that are required + * EnsureAllObjectDependenciesExistOnAllNodes could have distributed objects that are required * by this index. During the propagation process an active snapshout might be left as * a side effect of inserting the local tuples via SPI. To not leak a snapshot like * that we will pop any snapshot if we have any right before we commit. diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 6d19d6726..dcec5c2cc 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -137,8 +137,12 @@ RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok) List * PostprocessAlterRoleStmt(Node *node, const char *queryString) { - ObjectAddress address = GetObjectAddressFromParseTree(node, false); - if (!ShouldPropagateObject(&address)) + List *addresses = GetObjectAddressListFromParseTree(node, false); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!ShouldPropagateAnyObject(addresses)) { return NIL; } @@ -208,14 +212,17 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, return NIL; } - ObjectAddress address = GetObjectAddressFromParseTree(node, false); + List *addresses = GetObjectAddressListFromParseTree(node, false); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); /* * stmt->role could be NULL when the statement is on 'ALL' roles, we do propagate for * ALL roles. If it is not NULL the role is for a specific role. If that role is not * distributed we will not propagate the statement */ - if (stmt->role != NULL && !IsObjectDistributed(&address)) + if (stmt->role != NULL && !IsAnyObjectDistributed(addresses)) { return NIL; } @@ -1056,7 +1063,6 @@ FilterDistributedRoles(List *roles) foreach_ptr(roleNode, roles) { RoleSpec *role = castNode(RoleSpec, roleNode); - ObjectAddress roleAddress = { 0 }; Oid roleOid = get_rolespec_oid(role, true); if (roleOid == InvalidOid) { @@ -1066,8 +1072,9 @@ FilterDistributedRoles(List *roles) */ continue; } - ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); - if (IsObjectDistributed(&roleAddress)) + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, roleOid); + if (IsAnyObjectDistributed(list_make1(roleAddress))) { distributedRoles = lappend(distributedRoles, role); } @@ -1137,12 +1144,13 @@ PostprocessGrantRoleStmt(Node *node, const char *queryString) RoleSpec *role = NULL; foreach_ptr(role, stmt->grantee_roles) { - ObjectAddress roleAddress = { 0 }; Oid roleOid = get_rolespec_oid(role, false); - ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); - if (IsObjectDistributed(&roleAddress)) + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, roleOid); + + if (IsAnyObjectDistributed(list_make1(roleAddress))) { - EnsureDependenciesExistOnAllNodes(&roleAddress); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(roleAddress)); } } return NIL; diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 825a56b09..571064130 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -259,10 +259,9 @@ FilterDistributedSchemas(List *schemas) continue; } - ObjectAddress address = { 0 }; - ObjectAddressSet(address, NamespaceRelationId, schemaOid); - - if (!IsObjectDistributed(&address)) + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, NamespaceRelationId, schemaOid); + if (!IsAnyObjectDistributed(list_make1(address))) { continue; } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 324e373b5..b3ae475aa 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -268,18 +268,16 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, Oid seqOid = RangeVarGetRelid(seq, NoLock, stmt->missing_ok); - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, seqOid); - - if (!IsObjectDistributed(&sequenceAddress)) + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, seqOid); + if (!IsAnyObjectDistributed(list_make1(sequenceAddress))) { continue; } /* collect information for all distributed sequences */ - ObjectAddress *addressp = palloc(sizeof(ObjectAddress)); - *addressp = sequenceAddress; - distributedSequenceAddresses = lappend(distributedSequenceAddresses, addressp); + distributedSequenceAddresses = lappend(distributedSequenceAddresses, + sequenceAddress); distributedSequencesList = lappend(distributedSequencesList, objectNameList); } @@ -334,10 +332,13 @@ PreprocessRenameSequenceStmt(Node *node, const char *queryString, ProcessUtility RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_SEQUENCE); - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, + stmt->missing_ok); - if (!ShouldPropagateObject(&address)) + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!ShouldPropagateAnyObject(addresses)) { return NIL; } @@ -395,21 +396,27 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString, { AlterSeqStmt *stmt = castNode(AlterSeqStmt, node); - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, + stmt->missing_ok); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); /* error out if the sequence is distributed */ - if (IsObjectDistributed(&address)) + if (IsAnyObjectDistributed(addresses)) { ereport(ERROR, (errmsg( "Altering a distributed sequence is currently not supported."))); } + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *address = linitial(addresses); + /* * error out if the sequence is used in a distributed table * and this is an ALTER SEQUENCE .. AS .. statement */ - Oid citusTableId = SequenceUsedInDistributedTable(&address); + Oid citusTableId = SequenceUsedInDistributedTable(address); if (citusTableId != InvalidOid) { List *options = stmt->options; @@ -463,6 +470,7 @@ SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress) } } } + return InvalidOid; } @@ -498,9 +506,13 @@ PreprocessAlterSequenceSchemaStmt(Node *node, const char *queryString, AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_SEQUENCE); - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); - if (!ShouldPropagateObject(&address)) + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, + stmt->missing_ok); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!ShouldPropagateAnyObject(addresses)) { return NIL; } @@ -572,16 +584,19 @@ PostprocessAlterSequenceSchemaStmt(Node *node, const char *queryString) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); Assert(stmt->objectType == OBJECT_SEQUENCE); - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, + stmt->missing_ok); - if (!ShouldPropagateObject(&address)) + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + if (!ShouldPropagateAnyObject(addresses)) { return NIL; } /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&address); + EnsureAllObjectDependenciesExistOnAllNodes(addresses); return NIL; } @@ -601,8 +616,12 @@ PreprocessAlterSequenceOwnerStmt(Node *node, const char *queryString, AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); - ObjectAddress sequenceAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&sequenceAddress)) + List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(sequenceAddresses) == 1); + + if (!ShouldPropagateAnyObject(sequenceAddresses)) { return NIL; } @@ -649,14 +668,18 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString) AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_SEQUENCE); - ObjectAddress sequenceAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&sequenceAddress)) + List *sequenceAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(sequenceAddresses) == 1); + + if (!ShouldPropagateAnyObject(sequenceAddresses)) { return NIL; } /* dependencies have changed (owner) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&sequenceAddress); + EnsureAllObjectDependenciesExistOnAllNodes(sequenceAddresses); return NIL; } @@ -744,10 +767,10 @@ PostprocessGrantOnSequenceStmt(Node *node, const char *queryString) RangeVar *sequence = NULL; foreach_ptr(sequence, distributedSequences) { - ObjectAddress sequenceAddress = { 0 }; + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); Oid sequenceOid = RangeVarGetRelid(sequence, NoLock, false); - ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); - EnsureDependenciesExistOnAllNodes(&sequenceAddress); + ObjectAddressSet(*sequenceAddress, RelationRelationId, sequenceOid); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(sequenceAddress)); } return NIL; } @@ -866,15 +889,15 @@ FilterDistributedSequences(GrantStmt *stmt) RangeVar *sequenceRangeVar = NULL; foreach_ptr(sequenceRangeVar, stmt->objects) { - ObjectAddress sequenceAddress = { 0 }; Oid sequenceOid = RangeVarGetRelid(sequenceRangeVar, NoLock, missing_ok); - ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, sequenceOid); /* * if this sequence from GRANT .. ON SEQUENCE .. is a distributed * sequence, add it to the list */ - if (IsObjectDistributed(&sequenceAddress)) + if (IsAnyObjectDistributed(list_make1(sequenceAddress))) { grantSequenceList = lappend(grantSequenceList, sequenceRangeVar); } diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index 48ed8df20..b93abcc79 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -122,9 +122,12 @@ PostprocessCreateStatisticsStmt(Node *node, const char *queryString) } bool missingOk = false; - ObjectAddress objectAddress = GetObjectAddressFromParseTree((Node *) stmt, missingOk); + List *objectAddresses = GetObjectAddressListFromParseTree((Node *) stmt, missingOk); - EnsureDependenciesExistOnAllNodes(&objectAddress); + /* the code-path only supports a single object */ + Assert(list_length(objectAddresses) == 1); + + EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); return NIL; } @@ -306,9 +309,12 @@ PostprocessAlterStatisticsSchemaStmt(Node *node, const char *queryString) } bool missingOk = false; - ObjectAddress objectAddress = GetObjectAddressFromParseTree((Node *) stmt, missingOk); + List *objectAddresses = GetObjectAddressListFromParseTree((Node *) stmt, missingOk); - EnsureDependenciesExistOnAllNodes(&objectAddress); + /* the code-path only supports a single object */ + Assert(list_length(objectAddresses) == 1); + + EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); return NIL; } @@ -449,10 +455,9 @@ PostprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString) return NIL; } - ObjectAddress statisticsAddress = { 0 }; - ObjectAddressSet(statisticsAddress, StatisticExtRelationId, statsOid); - - EnsureDependenciesExistOnAllNodes(&statisticsAddress); + ObjectAddress *statisticsAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*statisticsAddress, StatisticExtRelationId, statsOid); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(statisticsAddress)); return NIL; } diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 05f2a82ab..0b660af70 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -649,13 +649,19 @@ PostprocessAlterTableSchemaStmt(Node *node, const char *queryString) /* * We will let Postgres deal with missing_ok */ - ObjectAddress tableAddress = GetObjectAddressFromParseTree((Node *) stmt, true); + List *tableAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + + /* the code-path only supports a single object */ + Assert(list_length(tableAddress) == 1); + + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *tableAddress = linitial(tableAddresses); /* * Check whether we are dealing with a sequence or view here and route queries * accordingly to the right processor function. */ - char relKind = get_rel_relkind(tableAddress.objectId); + char relKind = get_rel_relkind(tableAddress->objectId); if (relKind == RELKIND_SEQUENCE) { stmt->objectType = OBJECT_SEQUENCE; @@ -667,12 +673,12 @@ PostprocessAlterTableSchemaStmt(Node *node, const char *queryString) return PostprocessAlterViewSchemaStmt((Node *) stmt, queryString); } - if (!ShouldPropagate() || !IsCitusTable(tableAddress.objectId)) + if (!ShouldPropagate() || !IsCitusTable(tableAddress->objectId)) { return NIL; } - EnsureDependenciesExistOnAllNodes(&tableAddress); + EnsureAllObjectDependenciesExistOnAllNodes(tableAddresses); return NIL; } @@ -1776,9 +1782,15 @@ PreprocessAlterTableSchemaStmt(Node *node, const char *queryString, return NIL; } - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, - stmt->missing_ok); - Oid relationId = address.objectId; + List *addresses = GetObjectAddressListFromParseTree((Node *) stmt, + stmt->missing_ok); + + /* the code-path only supports a single object */ + Assert(list_length(addresses) == 1); + + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *address = linitial(addresses); + Oid relationId = address->objectId; /* * Check whether we are dealing with a sequence or view here and route queries @@ -1990,9 +2002,9 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) EnsureRelationHasCompatibleSequenceTypes(relationId); /* changing a relation could introduce new dependencies */ - ObjectAddress tableAddress = { 0 }; - ObjectAddressSet(tableAddress, RelationRelationId, relationId); - EnsureDependenciesExistOnAllNodes(&tableAddress); + ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*tableAddress, RelationRelationId, relationId); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress)); } /* for the new sequences coming with this ALTER TABLE statement */ diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 12d7253e2..9d9d62342 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -224,8 +224,12 @@ PostprocessCreateTriggerStmt(Node *node, const char *queryString) EnsureCoordinator(); ErrorOutForTriggerIfNotSupported(relationId); - ObjectAddress objectAddress = GetObjectAddressFromParseTree(node, missingOk); - EnsureDependenciesExistOnAllNodes(&objectAddress); + List *objectAddresses = GetObjectAddressListFromParseTree(node, missingOk); + + /* the code-path only supports a single object */ + Assert(list_length(objectAddresses) == 1); + + EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); char *triggerName = createTriggerStmt->trigname; return CitusCreateTriggerCommandDDLJob(relationId, triggerName, diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 9ebe51510..a074fa649 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -117,8 +117,12 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString, Assert(stmt->renameType == OBJECT_ATTRIBUTE); Assert(stmt->relationType == OBJECT_TYPE); - ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - if (!ShouldPropagateObject(&typeAddress)) + List *typeAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); + + /* the code-path only supports a single object */ + Assert(list_length(objectAddresses) == 1); + + if (!ShouldPropagateAnyObject(typeAddresses)) { return NIL; } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index aae9c6104..0205a0ab9 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -853,8 +853,12 @@ ProcessUtilityInternal(PlannedStmt *pstmt, */ if (ops && ops->markDistributed) { - ObjectAddress address = GetObjectAddressFromParseTree(parsetree, false); - MarkObjectDistributed(&address); + List *addresses = GetObjectAddressListFromParseTree(parsetree, false); + ObjectAddress *address = NULL; + foreach_ptr(address, addresses) + { + MarkObjectDistributed(address); + } } } diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index daf255652..f8900a800 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -94,22 +94,27 @@ PostprocessViewStmt(Node *node, const char *queryString) return NIL; } - ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); - if (IsObjectAddressOwnedByExtension(&viewAddress, NULL)) + /* the code-path only supports a single object */ + Assert(list_length(viewAddresses) == 1); + + if (IsAnyObjectAddressOwnedByExtension(viewAddresses, NULL)) { return NIL; } /* If the view has any unsupported dependency, create it locally */ - if (ErrorOrWarnIfObjectHasUnsupportedDependency(&viewAddress)) + if (ErrorOrWarnIfAnyObjectHasUnsupportedDependency(viewAddresses)) { return NIL; } - EnsureDependenciesExistOnAllNodes(&viewAddress); + EnsureAllObjectDependenciesExistOnAllNodes(viewAddresses); - char *command = CreateViewDDLCommand(viewAddress.objectId); + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *viewAddress = linitial(viewAddresses); + char *command = CreateViewDDLCommand(viewAddress->objectId); /* * We'd typically use NodeDDLTaskList() for generating node-level DDL commands, @@ -140,7 +145,7 @@ PostprocessViewStmt(Node *node, const char *queryString) * */ DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetObjectAddress = viewAddress; + ddlJob->targetObjectAddress = *viewAddress; ddlJob->metadataSyncCommand = command; ddlJob->taskList = NIL; @@ -442,10 +447,9 @@ IsViewDistributed(Oid viewOid) Assert(get_rel_relkind(viewOid) == RELKIND_VIEW || get_rel_relkind(viewOid) == RELKIND_MATVIEW); - ObjectAddress viewAddress = { 0 }; - ObjectAddressSet(viewAddress, RelationRelationId, viewOid); - - return IsObjectDistributed(&viewAddress); + ObjectAddress *viewAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*viewAddress, RelationRelationId, viewOid); + return IsAnyObjectDistributed(list_make1(viewAddress)); } @@ -458,8 +462,12 @@ PreprocessAlterViewStmt(Node *node, const char *queryString, ProcessUtilityConte { AlterTableStmt *stmt = castNode(AlterTableStmt, node); - ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!ShouldPropagateObject(&viewAddress)) + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + + /* the code-path only supports a single object */ + Assert(list_length(viewAddresses) == 1); + + if (!ShouldPropagateAnyObject(viewAddresses)) { return NIL; } @@ -471,12 +479,15 @@ PreprocessAlterViewStmt(Node *node, const char *queryString, ProcessUtilityConte /* reconstruct alter statement in a portable fashion */ const char *alterViewStmtSql = DeparseTreeNode((Node *) stmt); + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *viewAddress = linitial(viewAddresses); + /* * To avoid sequential mode, we are using metadata connection. For the * detailed explanation, please check the comment on PostprocessViewStmt. */ DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetObjectAddress = viewAddress; + ddlJob->targetObjectAddress = *viewAddress; ddlJob->metadataSyncCommand = alterViewStmtSql; ddlJob->taskList = NIL; @@ -493,24 +504,28 @@ PostprocessAlterViewStmt(Node *node, const char *queryString) AlterTableStmt *stmt = castNode(AlterTableStmt, node); Assert(AlterTableStmtObjType_compat(stmt) == OBJECT_VIEW); - ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!ShouldPropagateObject(&viewAddress)) + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + + /* the code-path only supports a single object */ + Assert(list_length(viewAddresses) == 1); + + if (!ShouldPropagateAnyObject(viewAddresses)) { return NIL; } - if (IsObjectAddressOwnedByExtension(&viewAddress, NULL)) + if (IsAnyObjectAddressOwnedByExtension(viewAddresses, NULL)) { return NIL; } /* If the view has any unsupported dependency, create it locally */ - if (ErrorOrWarnIfObjectHasUnsupportedDependency(&viewAddress)) + if (ErrorOrWarnIfAnyObjectHasUnsupportedDependency(viewAddresses)) { return NIL; } - EnsureDependenciesExistOnAllNodes(&viewAddress); + EnsureAllObjectDependenciesExistOnAllNodes(viewAddresses); return NIL; } @@ -541,8 +556,12 @@ List * PreprocessRenameViewStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - ObjectAddress viewAddress = GetObjectAddressFromParseTree(node, true); - if (!ShouldPropagateObject(&viewAddress)) + List *viewAddresses = GetObjectAddressListFromParseTree(node, true); + + /* the code-path only supports a single object */ + Assert(list_length(viewAddresses) == 1); + + if (!ShouldPropagateAnyObject(viewAddresses)) { return NIL; } @@ -555,12 +574,15 @@ PreprocessRenameViewStmt(Node *node, const char *queryString, /* deparse sql*/ const char *renameStmtSql = DeparseTreeNode(node); + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *viewAddress = linitial(viewAddresses); + /* * To avoid sequential mode, we are using metadata connection. For the * detailed explanation, please check the comment on PostprocessViewStmt. */ DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetObjectAddress = viewAddress; + ddlJob->targetObjectAddress = *viewAddress; ddlJob->metadataSyncCommand = renameStmtSql; ddlJob->taskList = NIL; @@ -596,8 +618,12 @@ PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!ShouldPropagateObject(&viewAddress)) + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + + /* the code-path only supports a single object */ + Assert(list_length(viewAddresses) == 1); + + if (!ShouldPropagateAnyObject(viewAddresses)) { return NIL; } @@ -608,12 +634,15 @@ PreprocessAlterViewSchemaStmt(Node *node, const char *queryString, const char *sql = DeparseTreeNode((Node *) stmt); + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *viewAddress = linitial(viewAddresses); + /* * To avoid sequential mode, we are using metadata connection. For the * detailed explanation, please check the comment on PostprocessViewStmt. */ DDLJob *ddlJob = palloc0(sizeof(DDLJob)); - ddlJob->targetObjectAddress = viewAddress; + ddlJob->targetObjectAddress = *viewAddress; ddlJob->metadataSyncCommand = sql; ddlJob->taskList = NIL; @@ -631,14 +660,18 @@ PostprocessAlterViewSchemaStmt(Node *node, const char *queryString) { AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); - ObjectAddress viewAddress = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!ShouldPropagateObject(&viewAddress)) + List *viewAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); + + /* the code-path only supports a single object */ + Assert(list_length(viewAddresses) == 1); + + if (!ShouldPropagateAnyObject(viewAddresses)) { return NIL; } /* dependencies have changed (schema) let's ensure they exist */ - EnsureDependenciesExistOnAllNodes(&viewAddress); + EnsureAllObjectDependenciesExistOnAllNodes(viewAddresses); return NIL; } diff --git a/src/backend/distributed/deparser/objectaddress.c b/src/backend/distributed/deparser/objectaddress.c index 123445c23..c6638b4e6 100644 --- a/src/backend/distributed/deparser/objectaddress.c +++ b/src/backend/distributed/deparser/objectaddress.c @@ -20,11 +20,11 @@ /* - * GetObjectAddressFromParseTree returns the ObjectAddress of the main target of the parse + * GetObjectAddressListFromParseTree returns the list of ObjectAddress of the main target of the parse * tree. */ -ObjectAddress -GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok) +List * +GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok) { const DistributeObjectOps *ops = GetDistributeObjectOps(parseTree); @@ -33,19 +33,7 @@ GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok) ereport(ERROR, (errmsg("unsupported statement to get object address for"))); } - List *objectAddresses = ops->address(parseTree, missing_ok); - - if (list_length(objectAddresses) > 1) - { - ereport(ERROR, (errmsg( - "citus does not support multiple object addresses in GetObjectAddressFromParseTree"))); - } - - Assert(list_length(objectAddresses) == 1); - - ObjectAddress *objectAddress = linitial(objectAddresses); - - return *objectAddress; + return ops->address(parseTree, missing_ok); } diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 99bcfc240..d787b17aa 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -137,6 +137,8 @@ static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid static List * GetTypeConstraintDependencyDefinition(Oid typeId); static List * CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList); static ObjectAddress DependencyDefinitionObjectAddress(DependencyDefinition *definition); +static DeferredErrorMessage * DeferErrorIfHasUnsupportedDependency(const ObjectAddress * + objectAddress); /* forward declarations for functions to interact with the ObjectAddressCollector */ static void InitObjectAddressCollector(ObjectAddressCollector *collector); @@ -176,7 +178,10 @@ static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, static List * GetDependentRoleIdsFDW(Oid FDWOid); static List * ExpandRolesToGroups(Oid roleid); static ViewDependencyNode * BuildViewDependencyGraph(Oid relationId, HTAB *nodeMap); - +static bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, + ObjectAddress *extensionAddress); +static bool ErrorOrWarnIfObjectHasUnsupportedDependency(const + ObjectAddress *objectAddress); /* * GetUniqueDependenciesList takes a list of object addresses and returns a new list @@ -774,8 +779,8 @@ SupportedDependencyByCitus(const ObjectAddress *address) * object doesn't have any unsupported dependency, else throws a message with proper level * (except the cluster doesn't have any node) and return true. */ -bool -ErrorOrWarnIfObjectHasUnsupportedDependency(ObjectAddress *objectAddress) +static bool +ErrorOrWarnIfObjectHasUnsupportedDependency(const ObjectAddress *objectAddress) { DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(objectAddress); if (errMsg != NULL) @@ -805,7 +810,7 @@ ErrorOrWarnIfObjectHasUnsupportedDependency(ObjectAddress *objectAddress) * is not distributed yet, we can create it locally to not affect user's local * usage experience. */ - else if (IsObjectDistributed(objectAddress)) + else if (IsAnyObjectDistributed(list_make1((ObjectAddress *) objectAddress))) { RaiseDeferredError(errMsg, ERROR); } @@ -821,11 +826,31 @@ ErrorOrWarnIfObjectHasUnsupportedDependency(ObjectAddress *objectAddress) } +/* + * ErrorOrWarnIfAnyObjectHasUnsupportedDependency iteratively calls + * ErrorOrWarnIfObjectHasUnsupportedDependency for given addresses. + */ +bool +ErrorOrWarnIfAnyObjectHasUnsupportedDependency(List *objectAddresses) +{ + ObjectAddress *objectAddress = NULL; + foreach_ptr(objectAddress, objectAddresses) + { + if (ErrorOrWarnIfObjectHasUnsupportedDependency(objectAddress)) + { + return true; + } + } + + return false; +} + + /* * DeferErrorIfHasUnsupportedDependency returns deferred error message if the given * object has any undistributable dependency. */ -DeferredErrorMessage * +static DeferredErrorMessage * DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) { ObjectAddress *undistributableDependency = GetUndistributableDependency( @@ -858,7 +883,7 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) * Otherwise, callers are expected to throw the error returned from this * function as a hard one by ignoring the detail part. */ - if (!IsObjectDistributed(objectAddress)) + if (!IsAnyObjectDistributed(list_make1((ObjectAddress *) objectAddress))) { appendStringInfo(detailInfo, "\"%s\" will be created only locally", objectDescription); @@ -873,7 +898,7 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) objectDescription, dependencyDescription); - if (IsObjectDistributed(objectAddress)) + if (IsAnyObjectDistributed(list_make1((ObjectAddress *) objectAddress))) { appendStringInfo(hintInfo, "Distribute \"%s\" first to modify \"%s\" on worker nodes", @@ -900,6 +925,28 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) } +/* + * DeferErrorIfAnyObjectHasUnsupportedDependency iteratively calls + * DeferErrorIfHasUnsupportedDependency for given addresses. + */ +DeferredErrorMessage * +DeferErrorIfAnyObjectHasUnsupportedDependency(const List *objectAddresses) +{ + DeferredErrorMessage *deferredErrorMessage = NULL; + ObjectAddress *objectAddress = NULL; + foreach_ptr(objectAddress, objectAddresses) + { + deferredErrorMessage = DeferErrorIfHasUnsupportedDependency(objectAddress); + if (deferredErrorMessage) + { + return deferredErrorMessage; + } + } + + return NULL; +} + + /* * GetUndistributableDependency checks whether object has any non-distributable * dependency. If any one found, it will be returned. @@ -936,7 +983,7 @@ GetUndistributableDependency(const ObjectAddress *objectAddress) /* * If object is distributed already, ignore it. */ - if (IsObjectDistributed(dependency)) + if (IsAnyObjectDistributed(list_make1(dependency))) { continue; } @@ -1015,7 +1062,7 @@ IsTableOwnedByExtension(Oid relationId) * If extensionAddress is not set to a NULL pointer the function will write the extension * address this function depends on into this location. */ -bool +static bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, ObjectAddress *extensionAddress) { @@ -1055,6 +1102,27 @@ IsObjectAddressOwnedByExtension(const ObjectAddress *target, } +/* + * IsAnyObjectAddressOwnedByExtension iteratively calls IsObjectAddressOwnedByExtension + * for given addresses to determine if any address is owned by an extension. + */ +bool +IsAnyObjectAddressOwnedByExtension(const List *targets, + ObjectAddress *extensionAddress) +{ + ObjectAddress *target = NULL; + foreach_ptr(target, targets) + { + if (IsObjectAddressOwnedByExtension(target, extensionAddress)) + { + return true; + } + } + + return false; +} + + /* * FollowNewSupportedDependencies applies filters on pg_depend entries to follow all * objects which should be distributed before the root object can safely be created. @@ -1097,7 +1165,9 @@ FollowNewSupportedDependencies(ObjectAddressCollector *collector, * If the object is already distributed it is not a `new` object that needs to be * distributed before we create a dependent object */ - if (IsObjectDistributed(&address)) + ObjectAddress *copyAddress = palloc0(sizeof(ObjectAddress)); + *copyAddress = address; + if (IsAnyObjectDistributed(list_make1(copyAddress))) { return false; } diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 0581e54b3..55d7c9f33 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -53,6 +53,7 @@ static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress); static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, Datum *paramValues); +static bool IsObjectDistributed(const ObjectAddress *address); PG_FUNCTION_INFO_V1(citus_unmark_object_distributed); PG_FUNCTION_INFO_V1(master_unmark_object_distributed); @@ -240,17 +241,18 @@ ShouldMarkRelationDistributed(Oid relationId) return false; } - ObjectAddress relationAddress = { 0 }; - ObjectAddressSet(relationAddress, RelationRelationId, relationId); + ObjectAddress *relationAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*relationAddress, RelationRelationId, relationId); bool pgObject = (relationId < FirstNormalObjectId); - bool isObjectSupported = SupportedDependencyByCitus(&relationAddress); + bool isObjectSupported = SupportedDependencyByCitus(relationAddress); bool ownedByExtension = IsTableOwnedByExtension(relationId); - bool alreadyDistributed = IsObjectDistributed(&relationAddress); + bool alreadyDistributed = IsObjectDistributed(relationAddress); bool hasUnsupportedDependency = - DeferErrorIfHasUnsupportedDependency(&relationAddress) != NULL; + DeferErrorIfAnyObjectHasUnsupportedDependency(list_make1(relationAddress)) != + NULL; bool hasCircularDependency = - DeferErrorIfCircularDependencyExists(&relationAddress) != NULL; + DeferErrorIfCircularDependencyExists(relationAddress) != NULL; /* * pgObject: Citus never marks pg objects as distributed @@ -390,7 +392,7 @@ UnmarkObjectDistributed(const ObjectAddress *address) * IsObjectDistributed returns if the object addressed is already distributed in the * cluster. This performs a local indexed lookup in pg_dist_object. */ -bool +static bool IsObjectDistributed(const ObjectAddress *address) { ScanKeyData key[3]; @@ -422,6 +424,26 @@ IsObjectDistributed(const ObjectAddress *address) } +/* + * IsAnyObjectDistributed iteratively calls IsObjectDistributed for given addresses to + * determine if any object is distributed. + */ +bool +IsAnyObjectDistributed(const List *addresses) +{ + ObjectAddress *address = NULL; + foreach_ptr(address, addresses) + { + if (IsObjectDistributed(address)) + { + return true; + } + } + + return false; +} + + /* * GetDistributedObjectAddressList returns a list of ObjectAddresses that contains all * distributed objects as marked in pg_dist_object diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index aa6294ca2..38a2308ff 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -356,10 +356,9 @@ CreateDependingViewsOnWorkers(Oid relationId) continue; } - ObjectAddress viewAddress = { 0 }; - ObjectAddressSet(viewAddress, RelationRelationId, viewOid); - - EnsureDependenciesExistOnAllNodes(&viewAddress); + ObjectAddress *viewAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*viewAddress, RelationRelationId, viewOid); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(viewAddress)); char *createViewCommand = CreateViewDDLCommand(viewOid); char *alterViewOwnerCommand = AlterViewOwnerCommand(viewOid); @@ -367,7 +366,7 @@ CreateDependingViewsOnWorkers(Oid relationId) SendCommandToWorkersWithMetadata(createViewCommand); SendCommandToWorkersWithMetadata(alterViewOwnerCommand); - MarkObjectDistributed(&viewAddress); + MarkObjectDistributed(viewAddress); } SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION); @@ -603,10 +602,10 @@ ShouldSyncSequenceMetadata(Oid relationId) return false; } - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, relationId); + ObjectAddress *sequenceAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*sequenceAddress, RelationRelationId, relationId); - return IsObjectDistributed(&sequenceAddress); + return IsAnyObjectDistributed(list_make1(sequenceAddress)); } diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index c43da76aa..b9841dabf 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -70,7 +70,6 @@ master_create_worker_shards(PG_FUNCTION_ARGS) text *tableNameText = PG_GETARG_TEXT_P(0); int32 shardCount = PG_GETARG_INT32(1); int32 replicationFactor = PG_GETARG_INT32(2); - ObjectAddress tableAddress = { 0 }; Oid distributedTableId = ResolveRelationId(tableNameText, false); @@ -83,8 +82,9 @@ master_create_worker_shards(PG_FUNCTION_ARGS) * via their own connection and committed immediately so they become visible to all * sessions creating shards. */ - ObjectAddressSet(tableAddress, RelationRelationId, distributedTableId); - EnsureDependenciesExistOnAllNodes(&tableAddress); + ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*tableAddress, RelationRelationId, distributedTableId); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress)); EnsureReferenceTablesExistOnAllNodes(); diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index 8f77205cb..e67691e44 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -96,7 +96,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS) text *relationNameText = PG_GETARG_TEXT_P(0); char *relationName = text_to_cstring(relationNameText); uint32 attemptableNodeCount = 0; - ObjectAddress tableAddress = { 0 }; + ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress)); uint32 candidateNodeIndex = 0; List *candidateNodeList = NIL; @@ -115,8 +115,8 @@ master_create_empty_shard(PG_FUNCTION_ARGS) * via their own connection and committed immediately so they become visible to all * sessions creating shards. */ - ObjectAddressSet(tableAddress, RelationRelationId, relationId); - EnsureDependenciesExistOnAllNodes(&tableAddress); + ObjectAddressSet(*tableAddress, RelationRelationId, relationId); + EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress)); EnsureReferenceTablesExistOnAllNodes(); /* don't allow the table to be dropped */ diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 2ba4797bf..84a257e04 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -181,8 +181,13 @@ WorkerCreateOrReplaceObject(List *sqlStatements) * same subject. */ Node *parseTree = ParseTreeNode(linitial(sqlStatements)); - ObjectAddress address = GetObjectAddressFromParseTree(parseTree, true); - if (ObjectExists(&address)) + List *addresses = GetObjectAddressListFromParseTree(parseTree, true); + Assert(list_length(viewAddresses) == 1); + + /* We have already asserted that we have exactly 1 address in the addresses. */ + ObjectAddress *address = linitial(addresses); + + if (ObjectExists(address)) { /* * Object with name from statement is already found locally, check if states are @@ -195,7 +200,7 @@ WorkerCreateOrReplaceObject(List *sqlStatements) * recreate our version of the object. This we can compare to what the coordinator * sent us. If they match we don't do anything. */ - List *localSqlStatements = CreateStmtListByObjectAddress(&address); + List *localSqlStatements = CreateStmtListByObjectAddress(address); if (CompareStringList(sqlStatements, localSqlStatements)) { /* @@ -208,9 +213,9 @@ WorkerCreateOrReplaceObject(List *sqlStatements) return false; } - char *newName = GenerateBackupNameForCollision(&address); + char *newName = GenerateBackupNameForCollision(address); - RenameStmt *renameStmt = CreateRenameStatement(&address, newName); + RenameStmt *renameStmt = CreateRenameStatement(address, newName); const char *sqlRenameStmt = DeparseTreeNode((Node *) renameStmt); ProcessUtilityParseTree((Node *) renameStmt, sqlRenameStmt, PROCESS_UTILITY_QUERY, diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 452de62e3..14166e30b 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -127,7 +127,8 @@ WorkerDropDistributedTable(Oid relationId) relation_close(distributedRelation, AccessShareLock); /* prepare distributedTableObject for dropping the table */ - ObjectAddress distributedTableObject = { RelationRelationId, relationId, 0 }; + ObjectAddress *distributedTableObject = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*distributedTableObject, RelationRelationId, relationId); /* Drop dependent sequences from pg_dist_object */ #if PG_VERSION_NUM >= PG_VERSION_13 @@ -144,7 +145,7 @@ WorkerDropDistributedTable(Oid relationId) UnmarkObjectDistributed(&ownedSequenceAddress); } - UnmarkObjectDistributed(&distributedTableObject); + UnmarkObjectDistributed(distributedTableObject); /* * Remove metadata before object's itself to make functions no-op within @@ -177,7 +178,7 @@ WorkerDropDistributedTable(Oid relationId) * until the user runs DROP EXTENSION. Therefore, we skip dropping the * table. */ - if (!IsObjectAddressOwnedByExtension(&distributedTableObject, NULL)) + if (!IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL)) { char *relName = get_rel_name(relationId); Oid schemaId = get_rel_namespace(relationId); @@ -238,12 +239,9 @@ worker_drop_shell_table(PG_FUNCTION_ARGS) relation_close(distributedRelation, AccessShareLock); /* prepare distributedTableObject for dropping the table */ - ObjectAddress distributedTableObject = { InvalidOid, InvalidOid, 0 }; - distributedTableObject.classId = RelationRelationId; - distributedTableObject.objectId = relationId; - distributedTableObject.objectSubId = 0; - - if (IsObjectAddressOwnedByExtension(&distributedTableObject, NULL)) + ObjectAddress *distributedTableObject = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*distributedTableObject, RelationRelationId, relationId); + if (IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL)) { PG_RETURN_VOID(); } @@ -270,7 +268,7 @@ worker_drop_shell_table(PG_FUNCTION_ARGS) * * We drop the table with cascade since other tables may be referring to it. */ - performDeletion(&distributedTableObject, DROP_CASCADE, + performDeletion(distributedTableObject, DROP_CASCADE, PERFORM_DELETION_INTERNAL); PG_RETURN_VOID(); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index f3bdb19b2..23bfbae78 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -148,7 +148,7 @@ extern void QualifyAlterTypeOwnerStmt(Node *stmt); extern char * GetTypeNamespaceNameByNameList(List *names); extern Oid TypeOidGetNamespaceOid(Oid typeOid); -extern ObjectAddress GetObjectAddressFromParseTree(Node *parseTree, bool missing_ok); +extern List * GetObjectAddressListFromParseTree(Node *parseTree, bool missing_ok); extern List * RenameAttributeStmtObjectAddress(Node *stmt, bool missing_ok); /* forward declarations for deparse_view_stmts.c */ diff --git a/src/include/distributed/metadata/dependency.h b/src/include/distributed/metadata/dependency.h index dfe5a7ebc..f04e3a869 100644 --- a/src/include/distributed/metadata/dependency.h +++ b/src/include/distributed/metadata/dependency.h @@ -23,10 +23,9 @@ extern List * GetUniqueDependenciesList(List *objectAddressesList); extern List * GetDependenciesForObject(const ObjectAddress *target); extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target); extern List * GetAllDependenciesForObject(const ObjectAddress *target); -extern bool ErrorOrWarnIfObjectHasUnsupportedDependency(ObjectAddress *objectAddress); -extern DeferredErrorMessage * DeferErrorIfHasUnsupportedDependency(const - ObjectAddress * - objectAddress); +extern bool ErrorOrWarnIfAnyObjectHasUnsupportedDependency(List *objectAddresses); +extern DeferredErrorMessage * DeferErrorIfAnyObjectHasUnsupportedDependency(const List * + objectAddresses); extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList); extern bool SupportedDependencyByCitus(const ObjectAddress *address); extern List * GetPgDependTuplesForDependingObjects(Oid targetObjectClassId, diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 2bcb50778..cb905bbfe 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -20,15 +20,15 @@ extern bool ObjectExists(const ObjectAddress *address); extern bool CitusExtensionObject(const ObjectAddress *objectAddress); -extern bool IsObjectDistributed(const ObjectAddress *address); +extern bool IsAnyObjectDistributed(const List *addresses); extern bool ClusterHasDistributedFunctionWithDistArgument(void); extern void MarkObjectDistributed(const ObjectAddress *distAddress); extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); extern void MarkObjectDistributedLocally(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); extern bool IsTableOwnedByExtension(Oid relationId); -extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, - ObjectAddress *extensionAddress); +extern bool IsAnyObjectAddressOwnedByExtension(const List *targets, + ObjectAddress *extensionAddress); extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr); extern List * GetDistributedObjectAddressList(void); diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index e5b0877d7..bad361ae6 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -258,15 +258,15 @@ extern void CreateDistributedTable(Oid relationId, char *distributionColumnName, extern void CreateTruncateTrigger(Oid relationId); extern TableConversionReturn * UndistributeTable(TableConversionParameters *params); -extern void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target); +extern void EnsureAllObjectDependenciesExistOnAllNodes(const List *targets); extern DeferredErrorMessage * DeferErrorIfCircularDependencyExists(const ObjectAddress * objectAddress); extern List * GetDistributableDependenciesForObject(const ObjectAddress *target); -extern List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency); +extern List * GetAllDependencyCreateDDLCommands(const List *dependencies); extern bool ShouldPropagate(void); extern bool ShouldPropagateCreateInCoordinatedTransction(void); -extern bool ShouldPropagateObject(const ObjectAddress *address); +extern bool ShouldPropagateAnyObject(List *addresses); extern List * ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort); /* Remaining metadata utility functions */ From 6a32061c08ae202a0d4d5fafbdaf9604697dd51b Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Tue, 19 Jul 2022 19:24:15 +0300 Subject: [PATCH 05/10] Renames configure.in to fix warnings (#6034) When building packages on ubuntu jammy, we started to see some warnings. autoreconf: warning: autoconf input should be named 'configure.ac', not 'configure.in' --- Makefile.global.in | 4 ++-- autogen.sh | 2 +- config/general.m4 | 2 +- configure.in => configure.ac | 0 src/include/citus_config.h.in | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) rename configure.in => configure.ac (100%) diff --git a/Makefile.global.in b/Makefile.global.in index 427e9b33e..b3d2a7782 100644 --- a/Makefile.global.in +++ b/Makefile.global.in @@ -64,8 +64,8 @@ $(citus_top_builddir)/Makefile.global: $(citus_abs_top_srcdir)/configure $(citus $(citus_top_builddir)/config.status: $(citus_abs_top_srcdir)/configure $(citus_abs_top_srcdir)/src/backend/distributed/citus.control cd @abs_top_builddir@ && ./config.status --recheck && ./config.status -# Regenerate configure if configure.in changed -$(citus_abs_top_srcdir)/configure: $(citus_abs_top_srcdir)/configure.in +# Regenerate configure if configure.ac changed +$(citus_abs_top_srcdir)/configure: $(citus_abs_top_srcdir)/configure.ac cd ${citus_abs_top_srcdir} && ./autogen.sh # If specified via configure, replace the default compiler. Normally diff --git a/autogen.sh b/autogen.sh index 145d333cf..f6caa1b8c 100755 --- a/autogen.sh +++ b/autogen.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# autogen.sh converts configure.in to configure and creates +# autogen.sh converts configure.ac to configure and creates # citus_config.h.in. The resuting resulting files are checked into # the SCM, to avoid everyone needing autoconf installed. diff --git a/config/general.m4 b/config/general.m4 index 55097b727..f8fd48297 100644 --- a/config/general.m4 +++ b/config/general.m4 @@ -10,7 +10,7 @@ # argument (other than "yes/no"), etc. # # The point of this implementation is to reduce code size and -# redundancy in configure.in and to improve robustness and consistency +# redundancy in configure.ac and to improve robustness and consistency # in the option evaluation code. diff --git a/configure.in b/configure.ac similarity index 100% rename from configure.in rename to configure.ac diff --git a/src/include/citus_config.h.in b/src/include/citus_config.h.in index 2951229f3..db813ec36 100644 --- a/src/include/citus_config.h.in +++ b/src/include/citus_config.h.in @@ -1,4 +1,4 @@ -/* src/include/citus_config.h.in. Generated from configure.in by autoheader. */ +/* src/include/citus_config.h.in. Generated from configure.ac by autoheader. */ /* From 108ca875ad89d835b5653750abb523f98623a149 Mon Sep 17 00:00:00 2001 From: aykutbozkurt Date: Wed, 20 Jul 2022 10:45:54 +0300 Subject: [PATCH 06/10] fix assertion bugs related to list length --- src/backend/distributed/commands/table.c | 2 +- src/backend/distributed/commands/type.c | 2 +- src/backend/distributed/worker/worker_create_or_replace.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 0b660af70..1d33d10fa 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -652,7 +652,7 @@ PostprocessAlterTableSchemaStmt(Node *node, const char *queryString) List *tableAddresses = GetObjectAddressListFromParseTree((Node *) stmt, true); /* the code-path only supports a single object */ - Assert(list_length(tableAddress) == 1); + Assert(list_length(tableAddresses) == 1); /* We have already asserted that we have exactly 1 address in the addresses. */ ObjectAddress *tableAddress = linitial(tableAddresses); diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index a074fa649..3a10ab6a0 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -120,7 +120,7 @@ PreprocessRenameTypeAttributeStmt(Node *node, const char *queryString, List *typeAddresses = GetObjectAddressListFromParseTree((Node *) stmt, false); /* the code-path only supports a single object */ - Assert(list_length(objectAddresses) == 1); + Assert(list_length(typeAddresses) == 1); if (!ShouldPropagateAnyObject(typeAddresses)) { diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 84a257e04..c6b749621 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -182,7 +182,7 @@ WorkerCreateOrReplaceObject(List *sqlStatements) */ Node *parseTree = ParseTreeNode(linitial(sqlStatements)); List *addresses = GetObjectAddressListFromParseTree(parseTree, true); - Assert(list_length(viewAddresses) == 1); + Assert(list_length(addresses) == 1); /* We have already asserted that we have exactly 1 address in the addresses. */ ObjectAddress *address = linitial(addresses); From 7d6410c838ff3ebb04ca24d0557efe4913be2f85 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Wed, 20 Jul 2022 17:49:36 +0300 Subject: [PATCH 07/10] Drop postgres 12 support (#6040) * Remove if conditions with PG_VERSION_NUM < 13 * Remove server_above_twelve(&eleven) checks from tests * Fix tests * Remove pg12 and pg11 alternative test output files * Remove pg12 specific normalization rules * Some more if conditions in the code * Change RemoteCollationIdExpression and some pg12/pg13 comments * Remove some more normalization rules --- src/backend/columnar/columnar_customscan.c | 7 - src/backend/columnar/columnar_tableam.c | 11 - src/backend/columnar/write_state_management.c | 4 - .../distributed/commands/alter_table.c | 4 - src/backend/distributed/commands/cluster.c | 7 +- .../commands/distribute_object_ops.c | 4 - src/backend/distributed/commands/function.c | 7 +- src/backend/distributed/commands/multi_copy.c | 13 - src/backend/distributed/commands/policy.c | 18 +- src/backend/distributed/commands/sequence.c | 6 +- src/backend/distributed/commands/statistics.c | 13 - .../distributed/commands/subscription.c | 6 - src/backend/distributed/commands/table.c | 6 +- src/backend/distributed/commands/vacuum.c | 15 +- .../locally_reserved_shared_connections.c | 4 - .../connection/placement_connection.c | 2 - .../connection/shared_connection_stats.c | 5 - .../distributed/deparser/citus_ruleutils.c | 4 +- .../deparser/deparse_statistics_stmts.c | 6 - .../deparser/qualify_statistics_stmt.c | 4 - .../distributed/deparser/ruleutils_12.c | 8033 ----------------- .../distributed/executor/adaptive_executor.c | 18 - src/backend/distributed/metadata/dependency.c | 2 - .../distributed/metadata/metadata_cache.c | 2 - .../distributed/metadata/metadata_sync.c | 20 +- .../distributed/metadata/metadata_utility.c | 3 - .../distributed/operations/shard_rebalancer.c | 3 - .../distributed/planner/distributed_planner.c | 4 +- .../planner/function_call_delegation.c | 2 +- .../planner/intermediate_result_pruning.c | 8 - .../distributed/planner/multi_explain.c | 39 +- .../planner/multi_logical_optimizer.c | 4 - .../planner/multi_logical_planner.c | 2 - .../planner/multi_physical_planner.c | 16 - .../relation_restriction_equivalence.c | 12 - .../distributed/relay/relay_event_utility.c | 2 - .../replication/multi_logical_replication.c | 6 - .../transaction/relation_access_tracking.c | 2 - .../distributed/utils/citus_outfuncs.c | 2 - .../utils/foreign_key_relationship.c | 2 - src/backend/distributed/utils/maintenanced.c | 2 - .../distributed/utils/task_execution_utils.c | 2 - .../distributed/worker/worker_drop_protocol.c | 75 - .../columnar/columnar_version_compat.h | 4 - src/include/distributed/distributed_planner.h | 6 - src/include/distributed/listutils.h | 8 +- .../distributed/multi_logical_planner.h | 2 - .../distributed/pg_version_constants.h | 1 - src/include/distributed/version_compat.h | 6 - src/include/pg_version_compat.h | 20 - src/test/regress/bin/normalize.sed | 33 - .../expected/alter_distributed_table.out | 10 - .../alter_table_set_access_method.out | 7 - .../regress/expected/citus_local_tables.out | 2 +- .../expected/citus_local_tables_mx.out | 4 +- .../regress/expected/columnar_truncate.out | 8 - .../columnar_types_without_comparison.out | 9 +- .../columnar_types_without_comparison_0.out | 159 - src/test/regress/expected/cte_inline.out | 9 - .../expected/distributed_functions.out | 2 +- .../distributed_types_xact_add_enum_value.out | 7 - .../regress/expected/follower_single_node.out | 7 - .../grant_on_foreign_server_propagation.out | 10 +- .../grant_on_foreign_server_propagation_0.out | 6 - .../regress/expected/intermediate_results.out | 2 +- .../expected/isolation_select_for_update.out | 4 +- .../expected/metadata_sync_helpers.out | 2 +- .../regress/expected/multi_create_shards.out | 2 +- src/test/regress/expected/multi_explain.out | 14 +- src/test/regress/expected/multi_extension.out | 14 +- .../regress/expected/multi_foreign_key.out | 8 +- .../regress/expected/multi_metadata_sync.out | 6 +- .../regress/expected/multi_partitioning.out | 2 +- .../expected/multi_partitioning_utils.out | 12 +- .../expected/multi_prune_shard_list.out | 2 +- .../expected/multi_tenant_isolation.out | 2 +- src/test/regress/expected/multi_truncate.out | 7 +- .../regress/expected/partition_wise_join.out | 36 +- .../expected/partition_wise_join_0.out | 36 +- src/test/regress/expected/pg12.out | 9 +- src/test/regress/expected/pg13.out | 7 - src/test/regress/expected/pg13_0.out | 6 - .../expected/pg13_propagate_statistics.out | 7 - .../expected/pg13_propagate_statistics_0.out | 6 - src/test/regress/expected/pg13_with_ties.out | 7 - .../regress/expected/pg13_with_ties_0.out | 6 - src/test/regress/expected/pg_dump.out | 18 +- .../expected/propagate_extension_commands.out | 8 - .../propagate_extension_commands_1.out | 643 -- .../regress/expected/sqlsmith_failures.out | 16 - .../expected/start_stop_metadata_sync.out | 12 +- src/test/regress/expected/tableam.out | 7 - .../expected/upgrade_columnar_after.out | 7 - .../expected/upgrade_columnar_after_0.out | 6 - .../expected/upgrade_columnar_before.out | 7 - .../expected/upgrade_list_citus_objects.out | 8 - src/test/regress/output/multi_copy.source | 8 +- .../regress/sql/alter_distributed_table.sql | 6 - .../sql/alter_table_set_access_method.sql | 8 - src/test/regress/sql/columnar_truncate.sql | 4 - .../sql/columnar_types_without_comparison.sql | 8 - src/test/regress/sql/cte_inline.sql | 5 - .../distributed_types_xact_add_enum_value.sql | 2 - src/test/regress/sql/follower_single_node.sql | 8 - .../grant_on_foreign_server_propagation.sql | 10 +- src/test/regress/sql/multi_extension.sql | 8 +- src/test/regress/sql/multi_truncate.sql | 3 +- src/test/regress/sql/pg12.sql | 8 - src/test/regress/sql/pg13.sql | 9 - .../regress/sql/pg13_propagate_statistics.sql | 8 - src/test/regress/sql/pg13_with_ties.sql | 9 - src/test/regress/sql/pg_dump.sql | 19 +- .../sql/propagate_extension_commands.sql | 4 - src/test/regress/sql/sqlsmith_failures.sql | 17 - src/test/regress/sql/tableam.sql | 8 - .../regress/sql/upgrade_columnar_after.sql | 8 - .../regress/sql/upgrade_columnar_before.sql | 8 - .../sql/upgrade_list_citus_objects.sql | 4 - 118 files changed, 126 insertions(+), 9702 deletions(-) delete mode 100644 src/backend/distributed/deparser/ruleutils_12.c delete mode 100644 src/test/regress/expected/columnar_types_without_comparison_0.out delete mode 100644 src/test/regress/expected/grant_on_foreign_server_propagation_0.out delete mode 100644 src/test/regress/expected/pg13_0.out delete mode 100644 src/test/regress/expected/pg13_propagate_statistics_0.out delete mode 100644 src/test/regress/expected/pg13_with_ties_0.out delete mode 100644 src/test/regress/expected/propagate_extension_commands_1.out delete mode 100644 src/test/regress/expected/upgrade_columnar_after_0.out diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c index ddb2104f8..4d4ba63f0 100644 --- a/src/backend/columnar/columnar_customscan.c +++ b/src/backend/columnar/columnar_customscan.c @@ -121,10 +121,8 @@ static void ColumnarScan_ExplainCustomScan(CustomScanState *node, List *ancestor static const char * ColumnarPushdownClausesStr(List *context, List *clauses); static const char * ColumnarProjectedColumnsStr(List *context, List *projectedColumns); -#if PG_VERSION_NUM >= 130000 static List * set_deparse_context_planstate(List *dpcontext, Node *node, List *ancestors); -#endif /* other helpers */ static List * ColumnarVarNeeded(ColumnarScanState *columnarScanState); @@ -1986,8 +1984,6 @@ ColumnarVarNeeded(ColumnarScanState *columnarScanState) } -#if PG_VERSION_NUM >= 130000 - /* * set_deparse_context_planstate is a compatibility wrapper for versions 13+. */ @@ -1997,6 +1993,3 @@ set_deparse_context_planstate(List *dpcontext, Node *node, List *ancestors) PlanState *ps = (PlanState *) node; return set_deparse_context_plan(dpcontext, ps->plan, ancestors); } - - -#endif diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 333231dec..cef532d2d 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -12,11 +12,7 @@ #include "access/rewriteheap.h" #include "access/tableam.h" #include "access/tsmapi.h" -#if PG_VERSION_NUM >= 130000 #include "access/detoast.h" -#else -#include "access/tuptoaster.h" -#endif #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/index.h" @@ -1676,15 +1672,8 @@ ColumnarReadRowsIntoIndex(TableScanDesc scan, Relation indexRelation, /* currently, columnar tables can't have dead tuples */ bool tupleIsAlive = true; -#if PG_VERSION_NUM >= PG_VERSION_13 indexCallback(indexRelation, &itemPointerData, indexValues, indexNulls, tupleIsAlive, indexCallbackState); -#else - HeapTuple scanTuple = ExecCopySlotHeapTuple(slot); - scanTuple->t_self = itemPointerData; - indexCallback(indexRelation, scanTuple, indexValues, indexNulls, - tupleIsAlive, indexCallbackState); -#endif reltuples++; } diff --git a/src/backend/columnar/write_state_management.c b/src/backend/columnar/write_state_management.c index 201a1a479..5ad849cac 100644 --- a/src/backend/columnar/write_state_management.c +++ b/src/backend/columnar/write_state_management.c @@ -14,12 +14,8 @@ #include "access/multixact.h" #include "access/rewriteheap.h" #include "access/tsmapi.h" -#if PG_VERSION_NUM >= 130000 #include "access/heaptoast.h" #include "common/hashfn.h" -#else -#include "access/tuptoaster.h" -#endif #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/index.h" diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 8258919f4..4aa6855cd 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -1567,11 +1567,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, ExecuteQueryViaSPI(query->data, SPI_OK_INSERT); } -#if PG_VERSION_NUM >= PG_VERSION_13 List *ownedSequences = getOwnedSequences(sourceId); -#else - List *ownedSequences = getOwnedSequences(sourceId, InvalidAttrNumber); -#endif Oid sequenceOid = InvalidOid; foreach_oid(sequenceOid, ownedSequences) { diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 37fc7bf92..a773816de 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -36,7 +36,6 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand, { ClusterStmt *clusterStmt = castNode(ClusterStmt, node); bool missingOK = false; - DDLJob *ddlJob = NULL; if (clusterStmt->relation == NULL) { @@ -67,18 +66,14 @@ PreprocessClusterStmt(Node *node, const char *clusterCommand, return NIL; } -#if PG_VERSION_NUM >= 120000 if (IsClusterStmtVerbose_compat(clusterStmt)) -#else - if (clusterStmt->verbose) -#endif { ereport(ERROR, (errmsg("cannot run CLUSTER command"), errdetail("VERBOSE option is currently unsupported " "for distributed tables."))); } - ddlJob = palloc0(sizeof(DDLJob)); + DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ObjectAddressSet(ddlJob->targetObjectAddress, RelationRelationId, relationId); ddlJob->metadataSyncCommand = clusterCommand; ddlJob->taskList = DDLTaskList(relationId, clusterCommand); diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 24afc0a0a..9c9fdb6e5 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -874,7 +874,6 @@ static DistributeObjectOps Schema_Rename = { .address = AlterSchemaRenameStmtObjectAddress, .markDistributed = false, }; -#if PG_VERSION_NUM >= PG_VERSION_13 static DistributeObjectOps Statistics_Alter = { .deparse = DeparseAlterStatisticsStmt, .qualify = QualifyAlterStatisticsStmt, @@ -883,7 +882,6 @@ static DistributeObjectOps Statistics_Alter = { .address = NULL, .markDistributed = false, }; -#endif static DistributeObjectOps Statistics_AlterObjectSchema = { .deparse = DeparseAlterStatisticsSchemaStmt, .qualify = QualifyAlterStatisticsSchemaStmt, @@ -1304,13 +1302,11 @@ GetDistributeObjectOps(Node *node) return &Sequence_Alter; } -#if PG_VERSION_NUM >= PG_VERSION_13 case T_AlterStatsStmt: { return &Statistics_Alter; } -#endif case T_AlterTableStmt: { AlterTableStmt *stmt = castNode(AlterTableStmt, node); diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 048c0a299..2d8a2e09a 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -920,11 +920,8 @@ GetFunctionAlterOwnerCommand(const RegProcedure funcOid) /* * GetAggregateDDLCommand returns a string for creating an aggregate. - * CREATE OR REPLACE AGGREGATE was only introduced in pg12, - * so a second parameter useCreateOrReplace signals whether to - * to create a plain CREATE AGGREGATE or not. In pg11 we return a string - * which is a call to worker_create_or_replace_object in lieu of - * CREATE OR REPLACE AGGREGATE. + * A second parameter useCreateOrReplace signals whether to + * to create a plain CREATE AGGREGATE or not. */ static char * GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index c19d26729..89c30c6a7 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -106,9 +106,7 @@ #include "nodes/nodeFuncs.h" #include "parser/parse_func.h" #include "parser/parse_type.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "tcop/cmdtag.h" -#endif #include "tsearch/ts_locale.h" #include "utils/builtins.h" #include "utils/lsyscache.h" @@ -764,12 +762,7 @@ FindJsonbInputColumns(TupleDesc tupleDescriptor, List *inputColumnNameList) static void CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount) { - #if PG_VERSION_NUM >= PG_VERSION_13 SetQueryCompletion(completionTag, CMDTAG_COPY, processedRowCount); - #else - SafeSnprintf(completionTag, COMPLETION_TAG_BUFSIZE, - "COPY " UINT64_FORMAT, processedRowCount); - #endif } @@ -781,9 +774,6 @@ static List * RemoveOptionFromList(List *optionList, char *optionName) { ListCell *optionCell = NULL; - #if PG_VERSION_NUM < PG_VERSION_13 - ListCell *previousCell = NULL; - #endif foreach(optionCell, optionList) { DefElem *option = (DefElem *) lfirst(optionCell); @@ -792,9 +782,6 @@ RemoveOptionFromList(List *optionList, char *optionName) { return list_delete_cell_compat(optionList, optionCell, previousCell); } - #if PG_VERSION_NUM < PG_VERSION_13 - previousCell = optionCell; - #endif } return optionList; diff --git a/src/backend/distributed/commands/policy.c b/src/backend/distributed/commands/policy.c index 2cbf6f6f9..f3e7b7ec9 100644 --- a/src/backend/distributed/commands/policy.c +++ b/src/backend/distributed/commands/policy.c @@ -290,22 +290,10 @@ PostprocessCreatePolicyStmt(Node *node, const char *queryString) static void AddRangeTableEntryToQueryCompat(ParseState *parseState, Relation relation) { -#if PG_VERSION_NUM >= PG_VERSION_13 - ParseNamespaceItem *rte = NULL; -#else - RangeTblEntry *rte = NULL; -#endif - - rte = addRangeTableEntryForRelation(parseState, relation, -#if PG_VERSION_NUM >= PG_VERSION_12 - AccessShareLock, -#endif - NULL, false, false); -#if PG_VERSION_NUM >= PG_VERSION_13 + ParseNamespaceItem *rte = addRangeTableEntryForRelation(parseState, relation, + AccessShareLock, NULL, + false, false); addNSItemToQuery(parseState, rte, false, true, true); -#else - addRTEtoQuery(parseState, rte, false, true, true); -#endif } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index b3ae475aa..6a51516b8 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -197,11 +197,7 @@ ExtractDefaultColumnsAndOwnedSequences(Oid relationId, List **columnNameList, if (list_length(columnOwnedSequences) != 0) { /* - * A column might only own one sequence. We intentionally use - * GetSequencesOwnedByColumn macro and pick initial oid from the - * list instead of using getOwnedSequence. This is both because - * getOwnedSequence is removed in pg13 and is also because it - * errors out if column does not have any sequences. + * A column might only own one sequence. */ Assert(list_length(columnOwnedSequences) == 1); ownedSequenceId = linitial_oid(columnOwnedSequences); diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index b93abcc79..5592c1df8 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -55,9 +55,7 @@ static char * GenerateAlterIndexColumnSetStatsCommand(char *indexNameWithSchema, int32 attstattarget); static Oid GetRelIdByStatsOid(Oid statsOid); static char * CreateAlterCommandIfOwnerNotDefault(Oid statsOid); -#if PG_VERSION_NUM >= PG_VERSION_13 static char * CreateAlterCommandIfTargetNotDefault(Oid statsOid); -#endif /* * PreprocessCreateStatisticsStmt is called during the planning phase for @@ -343,8 +341,6 @@ AlterStatisticsSchemaStmtObjectAddress(Node *node, bool missingOk) } -#if PG_VERSION_NUM >= PG_VERSION_13 - /* * PreprocessAlterStatisticsStmt is called during the planning phase for * ALTER STATISTICS .. SET STATISTICS. @@ -393,8 +389,6 @@ PreprocessAlterStatisticsStmt(Node *node, const char *queryString, } -#endif - /* * PreprocessAlterStatisticsOwnerStmt is called during the planning phase for * ALTER STATISTICS .. OWNER TO. @@ -507,7 +501,6 @@ GetExplicitStatisticsCommandList(Oid relationId) explicitStatisticsCommandList = lappend(explicitStatisticsCommandList, makeTableDDLCommandString(createStatisticsCommand)); -#if PG_VERSION_NUM >= PG_VERSION_13 /* we need to alter stats' target if it's getting distributed after creation */ char *alterStatisticsTargetCommand = @@ -519,7 +512,6 @@ GetExplicitStatisticsCommandList(Oid relationId) lappend(explicitStatisticsCommandList, makeTableDDLCommandString(alterStatisticsTargetCommand)); } -#endif /* we need to alter stats' owner if it's getting distributed after creation */ char *alterStatisticsOwnerCommand = @@ -709,8 +701,6 @@ CreateAlterCommandIfOwnerNotDefault(Oid statsOid) } -#if PG_VERSION_NUM >= PG_VERSION_13 - /* * CreateAlterCommandIfTargetNotDefault returns an ALTER STATISTICS .. SET STATISTICS * command if the stats object with given id has a target different than the default one. @@ -745,6 +735,3 @@ CreateAlterCommandIfTargetNotDefault(Oid statsOid) return DeparseAlterStatisticsStmt((Node *) alterStatsStmt); } - - -#endif diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index 09508ee8d..76d3bcb77 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -35,9 +35,6 @@ Node * ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt) { ListCell *currCell = NULL; - #if PG_VERSION_NUM < PG_VERSION_13 - ListCell *prevCell = NULL; - #endif bool useAuthinfo = false; foreach(currCell, createSubStmt->options) @@ -54,9 +51,6 @@ ProcessCreateSubscriptionStmt(CreateSubscriptionStmt *createSubStmt) break; } - #if PG_VERSION_NUM < PG_VERSION_13 - prevCell = currCell; - #endif } if (useAuthinfo) diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 1d33d10fa..9fbe1d993 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -757,9 +757,9 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand, { /* * We don't process subcommands generated by postgres. - * This is mainly because postgres started to issue ALTER TABLE commands - * for some set of objects that are defined via CREATE TABLE commands as - * of pg13. However, citus already has a separate logic for CREATE TABLE + * This is mainly because postgres issues ALTER TABLE commands + * for some set of objects that are defined via CREATE TABLE commands. + * However, citus already has a separate logic for CREATE TABLE * commands. * * To support foreign keys from/to postgres local tables to/from reference diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 374e52bda..274aebb8f 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -41,10 +41,7 @@ typedef struct CitusVacuumParams int options; VacOptValue truncate; VacOptValue index_cleanup; - - #if PG_VERSION_NUM >= PG_VERSION_13 int nworkers; - #endif } CitusVacuumParams; /* Local functions forward declarations for processing distributed table commands */ @@ -323,10 +320,8 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) /* if no flags remain, exit early */ if (vacuumFlags == 0 && vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED && - vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED -#if PG_VERSION_NUM >= PG_VERSION_13 - && vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET -#endif + vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED && + vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET ) { return vacuumPrefix->data; @@ -409,12 +404,10 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) } } -#if PG_VERSION_NUM >= PG_VERSION_13 if (vacuumParams.nworkers != VACUUM_PARALLEL_NOTSET) { appendStringInfo(vacuumPrefix, "PARALLEL %d,", vacuumParams.nworkers); } -#endif vacuumPrefix->data[vacuumPrefix->len - 1] = ')'; @@ -515,9 +508,7 @@ VacuumStmtParams(VacuumStmt *vacstmt) /* Set default value */ params.index_cleanup = VACOPTVALUE_UNSPECIFIED; params.truncate = VACOPTVALUE_UNSPECIFIED; - #if PG_VERSION_NUM >= PG_VERSION_13 params.nworkers = VACUUM_PARALLEL_NOTSET; - #endif /* Parse options list */ DefElem *opt = NULL; @@ -596,7 +587,6 @@ VacuumStmtParams(VacuumStmt *vacstmt) params.truncate = defGetBoolean(opt) ? VACOPTVALUE_ENABLED : VACOPTVALUE_DISABLED; } - #if PG_VERSION_NUM >= PG_VERSION_13 else if (strcmp(opt->defname, "parallel") == 0) { if (opt->arg == NULL) @@ -620,7 +610,6 @@ VacuumStmtParams(VacuumStmt *vacstmt) params.nworkers = nworkers; } } - #endif else { ereport(ERROR, diff --git a/src/backend/distributed/connection/locally_reserved_shared_connections.c b/src/backend/distributed/connection/locally_reserved_shared_connections.c index a26983104..9f703dc65 100644 --- a/src/backend/distributed/connection/locally_reserved_shared_connections.c +++ b/src/backend/distributed/connection/locally_reserved_shared_connections.c @@ -48,11 +48,7 @@ #include "distributed/tuplestore.h" #include "distributed/worker_manager.h" #include "utils/builtins.h" -#if PG_VERSION_NUM < PG_VERSION_13 -#include "utils/hashutils.h" -#else #include "common/hashfn.h" -#endif #define RESERVED_CONNECTION_COLUMNS 4 diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index 775e4b1cf..d3929f4b9 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -26,9 +26,7 @@ #include "distributed/placement_connection.h" #include "distributed/relation_access_tracking.h" #include "utils/hsearch.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "utils/memutils.h" diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c index 8602d23c2..82a354733 100644 --- a/src/backend/distributed/connection/shared_connection_stats.c +++ b/src/backend/distributed/connection/shared_connection_stats.c @@ -36,12 +36,7 @@ #include "distributed/time_constants.h" #include "distributed/tuplestore.h" #include "utils/builtins.h" -#if PG_VERSION_NUM < PG_VERSION_13 -#include "utils/hsearch.h" -#include "utils/hashutils.h" -#else #include "common/hashfn.h" -#endif #include "storage/ipc.h" diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 0cc3c1e0e..0a281be4d 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -526,7 +526,7 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults } /* - * Add table access methods for pg12 and higher when the table is configured with an + * Add table access methods when the table is configured with an * access method */ if (accessMethod) @@ -999,7 +999,6 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse appendStringInfo(buffer, "%s ", NameListToQuotedString(indexElement->opclass)); } -#if PG_VERSION_NUM >= PG_VERSION_13 /* Commit on postgres: 911e70207703799605f5a0e8aad9f06cff067c63*/ if (indexElement->opclassopts != NIL) @@ -1008,7 +1007,6 @@ deparse_index_columns(StringInfo buffer, List *indexParameterList, List *deparse AppendStorageParametersToString(buffer, indexElement->opclassopts); appendStringInfoString(buffer, ") "); } -#endif if (indexElement->ordering != SORTBY_DEFAULT) { diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index 90828cc67..923af645e 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -27,9 +27,7 @@ static void AppendCreateStatisticsStmt(StringInfo buf, CreateStatsStmt *stmt); static void AppendDropStatisticsStmt(StringInfo buf, List *nameList, bool ifExists); static void AppendAlterStatisticsRenameStmt(StringInfo buf, RenameStmt *stmt); static void AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); -#if PG_VERSION_NUM >= PG_VERSION_13 static void AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt); -#endif static void AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); static void AppendStatisticsName(StringInfo buf, CreateStatsStmt *stmt); static void AppendStatTypes(StringInfo buf, CreateStatsStmt *stmt); @@ -90,7 +88,6 @@ DeparseAlterStatisticsSchemaStmt(Node *node) } -#if PG_VERSION_NUM >= PG_VERSION_13 char * DeparseAlterStatisticsStmt(Node *node) { @@ -105,7 +102,6 @@ DeparseAlterStatisticsStmt(Node *node) } -#endif char * DeparseAlterStatisticsOwnerStmt(Node *node) { @@ -177,7 +173,6 @@ AppendAlterStatisticsSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt) } -#if PG_VERSION_NUM >= PG_VERSION_13 static void AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt) { @@ -186,7 +181,6 @@ AppendAlterStatisticsStmt(StringInfo buf, AlterStatsStmt *stmt) } -#endif static void AppendAlterStatisticsOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) { diff --git a/src/backend/distributed/deparser/qualify_statistics_stmt.c b/src/backend/distributed/deparser/qualify_statistics_stmt.c index 254005bd5..ce9443930 100644 --- a/src/backend/distributed/deparser/qualify_statistics_stmt.c +++ b/src/backend/distributed/deparser/qualify_statistics_stmt.c @@ -152,8 +152,6 @@ QualifyAlterStatisticsSchemaStmt(Node *node) } -#if PG_VERSION_NUM >= PG_VERSION_13 - /* * QualifyAlterStatisticsStmt qualifies AlterStatsStmt's with schema name for * ALTER STATISTICS .. SET STATISTICS statements. @@ -180,8 +178,6 @@ QualifyAlterStatisticsStmt(Node *node) } -#endif - /* * QualifyAlterStatisticsOwnerStmt qualifies AlterOwnerStmt's with schema * name for ALTER STATISTICS .. OWNER TO statements. diff --git a/src/backend/distributed/deparser/ruleutils_12.c b/src/backend/distributed/deparser/ruleutils_12.c deleted file mode 100644 index 513678df1..000000000 --- a/src/backend/distributed/deparser/ruleutils_12.c +++ /dev/null @@ -1,8033 +0,0 @@ -/*------------------------------------------------------------------------- - * - * ruleutils_12.c - * Functions to convert stored expressions/querytrees back to - * source text - * - * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/distributed/utils/ruleutils_12.c - * - * This needs to be closely in sync with the core code. - *------------------------------------------------------------------------- - */ -#include "distributed/pg_version_constants.h" - -#include "pg_config.h" - -#if (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13) - -#include "postgres.h" - -#include -#include -#include - -#include "access/amapi.h" -#include "access/htup_details.h" -#include "access/relation.h" -#include "access/sysattr.h" -#include "access/table.h" -#include "catalog/dependency.h" -#include "catalog/indexing.h" -#include "catalog/pg_aggregate.h" -#include "catalog/pg_am.h" -#include "catalog/pg_authid.h" -#include "catalog/pg_collation.h" -#include "catalog/pg_constraint.h" -#include "catalog/pg_depend.h" -#include "catalog/pg_extension.h" -#include "catalog/pg_foreign_data_wrapper.h" -#include "catalog/pg_language.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_operator.h" -#include "catalog/pg_partitioned_table.h" -#include "catalog/pg_proc.h" -#include "catalog/pg_statistic_ext.h" -#include "catalog/pg_trigger.h" -#include "catalog/pg_type.h" -#include "commands/defrem.h" -#include "commands/extension.h" -#include "commands/tablespace.h" -#include "common/keywords.h" -#include "distributed/citus_nodefuncs.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/namespace_utils.h" -#include "executor/spi.h" -#include "foreign/foreign.h" -#include "funcapi.h" -#include "mb/pg_wchar.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "optimizer/optimizer.h" -#include "parser/parse_node.h" -#include "parser/parse_agg.h" -#include "parser/parse_func.h" -#include "parser/parse_node.h" -#include "parser/parse_oper.h" -#include "parser/parser.h" -#include "parser/parsetree.h" -#include "rewrite/rewriteHandler.h" -#include "rewrite/rewriteManip.h" -#include "rewrite/rewriteSupport.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/hsearch.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/ruleutils.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" -#include "utils/typcache.h" -#include "utils/varlena.h" -#include "utils/xml.h" - - -/* ---------- - * Pretty formatting constants - * ---------- - */ - -/* Indent counts */ -#define PRETTYINDENT_STD 8 -#define PRETTYINDENT_JOIN 4 -#define PRETTYINDENT_VAR 4 - -#define PRETTYINDENT_LIMIT 40 /* wrap limit */ - -/* Pretty flags */ -#define PRETTYFLAG_PAREN 0x0001 -#define PRETTYFLAG_INDENT 0x0002 - -/* Default line length for pretty-print wrapping: 0 means wrap always */ -#define WRAP_COLUMN_DEFAULT 0 - -/* macros to test if pretty action needed */ -#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) -#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) - - -/* ---------- - * Local data types - * ---------- - */ - -/* Context info needed for invoking a recursive querytree display routine */ -typedef struct -{ - StringInfo buf; /* output buffer to append to */ - List *namespaces; /* List of deparse_namespace nodes */ - List *windowClause; /* Current query level's WINDOW clause */ - List *windowTList; /* targetlist for resolving WINDOW clause */ - int prettyFlags; /* enabling of pretty-print functions */ - int wrapColumn; /* max line length, or -1 for no limit */ - int indentLevel; /* current indent level for prettyprint */ - bool varprefix; /* true to print prefixes on Vars */ - Oid distrelid; /* the distributed table being modified, if valid */ - int64 shardid; /* a distributed table's shardid, if positive */ - ParseExprKind special_exprkind; /* set only for exprkinds needing special - * handling */ -} deparse_context; - -/* - * Each level of query context around a subtree needs a level of Var namespace. - * A Var having varlevelsup=N refers to the N'th item (counting from 0) in - * the current context's namespaces list. - * - * The rangetable is the list of actual RTEs from the query tree, and the - * cte list is the list of actual CTEs. - * - * rtable_names holds the alias name to be used for each RTE (either a C - * string, or NULL for nameless RTEs such as unnamed joins). - * rtable_columns holds the column alias names to be used for each RTE. - * - * In some cases we need to make names of merged JOIN USING columns unique - * across the whole query, not only per-RTE. If so, unique_using is true - * and using_names is a list of C strings representing names already assigned - * to USING columns. - * - * When deparsing plan trees, there is always just a single item in the - * deparse_namespace list (since a plan tree never contains Vars with - * varlevelsup > 0). We store the PlanState node that is the immediate - * parent of the expression to be deparsed, as well as a list of that - * PlanState's ancestors. In addition, we store its outer and inner subplan - * state nodes, as well as their plan nodes' targetlists, and the index tlist - * if the current plan node might contain INDEX_VAR Vars. (These fields could - * be derived on-the-fly from the current PlanState, but it seems notationally - * clearer to set them up as separate fields.) - */ -typedef struct -{ - List *rtable; /* List of RangeTblEntry nodes */ - List *rtable_names; /* Parallel list of names for RTEs */ - List *rtable_columns; /* Parallel list of deparse_columns structs */ - List *ctes; /* List of CommonTableExpr nodes */ - /* Workspace for column alias assignment: */ - bool unique_using; /* Are we making USING names globally unique */ - List *using_names; /* List of assigned names for USING columns */ - /* Remaining fields are used only when deparsing a Plan tree: */ - PlanState *planstate; /* immediate parent of current expression */ - List *ancestors; /* ancestors of planstate */ - PlanState *outer_planstate; /* outer subplan state, or NULL if none */ - PlanState *inner_planstate; /* inner subplan state, or NULL if none */ - List *outer_tlist; /* referent for OUTER_VAR Vars */ - List *inner_tlist; /* referent for INNER_VAR Vars */ - List *index_tlist; /* referent for INDEX_VAR Vars */ -} deparse_namespace; - -/* - * Per-relation data about column alias names. - * - * Selecting aliases is unreasonably complicated because of the need to dump - * rules/views whose underlying tables may have had columns added, deleted, or - * renamed since the query was parsed. We must nonetheless print the rule/view - * in a form that can be reloaded and will produce the same results as before. - * - * For each RTE used in the query, we must assign column aliases that are - * unique within that RTE. SQL does not require this of the original query, - * but due to factors such as *-expansion we need to be able to uniquely - * reference every column in a decompiled query. As long as we qualify all - * column references, per-RTE uniqueness is sufficient for that. - * - * However, we can't ensure per-column name uniqueness for unnamed join RTEs, - * since they just inherit column names from their input RTEs, and we can't - * rename the columns at the join level. Most of the time this isn't an issue - * because we don't need to reference the join's output columns as such; we - * can reference the input columns instead. That approach can fail for merged - * JOIN USING columns, however, so when we have one of those in an unnamed - * join, we have to make that column's alias globally unique across the whole - * query to ensure it can be referenced unambiguously. - * - * Another problem is that a JOIN USING clause requires the columns to be - * merged to have the same aliases in both input RTEs, and that no other - * columns in those RTEs or their children conflict with the USING names. - * To handle that, we do USING-column alias assignment in a recursive - * traversal of the query's jointree. When descending through a JOIN with - * USING, we preassign the USING column names to the child columns, overriding - * other rules for column alias assignment. We also mark each RTE with a list - * of all USING column names selected for joins containing that RTE, so that - * when we assign other columns' aliases later, we can avoid conflicts. - * - * Another problem is that if a JOIN's input tables have had columns added or - * deleted since the query was parsed, we must generate a column alias list - * for the join that matches the current set of input columns --- otherwise, a - * change in the number of columns in the left input would throw off matching - * of aliases to columns of the right input. Thus, positions in the printable - * column alias list are not necessarily one-for-one with varattnos of the - * JOIN, so we need a separate new_colnames[] array for printing purposes. - */ -typedef struct -{ - /* - * colnames is an array containing column aliases to use for columns that - * existed when the query was parsed. Dropped columns have NULL entries. - * This array can be directly indexed by varattno to get a Var's name. - * - * Non-NULL entries are guaranteed unique within the RTE, *except* when - * this is for an unnamed JOIN RTE. In that case we merely copy up names - * from the two input RTEs. - * - * During the recursive descent in set_using_names(), forcible assignment - * of a child RTE's column name is represented by pre-setting that element - * of the child's colnames array. So at that stage, NULL entries in this - * array just mean that no name has been preassigned, not necessarily that - * the column is dropped. - */ - int num_cols; /* length of colnames[] array */ - char **colnames; /* array of C strings and NULLs */ - - /* - * new_colnames is an array containing column aliases to use for columns - * that would exist if the query was re-parsed against the current - * definitions of its base tables. This is what to print as the column - * alias list for the RTE. This array does not include dropped columns, - * but it will include columns added since original parsing. Indexes in - * it therefore have little to do with current varattno values. As above, - * entries are unique unless this is for an unnamed JOIN RTE. (In such an - * RTE, we never actually print this array, but we must compute it anyway - * for possible use in computing column names of upper joins.) The - * parallel array is_new_col marks which of these columns are new since - * original parsing. Entries with is_new_col false must match the - * non-NULL colnames entries one-for-one. - */ - int num_new_cols; /* length of new_colnames[] array */ - char **new_colnames; /* array of C strings */ - bool *is_new_col; /* array of bool flags */ - - /* This flag tells whether we should actually print a column alias list */ - bool printaliases; - - /* This list has all names used as USING names in joins above this RTE */ - List *parentUsing; /* names assigned to parent merged columns */ - - /* - * If this struct is for a JOIN RTE, we fill these fields during the - * set_using_names() pass to describe its relationship to its child RTEs. - * - * leftattnos and rightattnos are arrays with one entry per existing - * output column of the join (hence, indexable by join varattno). For a - * simple reference to a column of the left child, leftattnos[i] is the - * child RTE's attno and rightattnos[i] is zero; and conversely for a - * column of the right child. But for merged columns produced by JOIN - * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. - * Also, if the column has been dropped, both are zero. - * - * If it's a JOIN USING, usingNames holds the alias names selected for the - * merged columns (these might be different from the original USING list, - * if we had to modify names to achieve uniqueness). - */ - int leftrti; /* rangetable index of left child */ - int rightrti; /* rangetable index of right child */ - int *leftattnos; /* left-child varattnos of join cols, or 0 */ - int *rightattnos; /* right-child varattnos of join cols, or 0 */ - List *usingNames; /* names assigned to merged columns */ -} deparse_columns; - -/* This macro is analogous to rt_fetch(), but for deparse_columns structs */ -#define deparse_columns_fetch(rangetable_index, dpns) \ - ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) - -/* - * Entry in set_rtable_names' hash table - */ -typedef struct -{ - char name[NAMEDATALEN]; /* Hash key --- must be first */ - int counter; /* Largest addition used so far for name */ -} NameHashEntry; - - -/* ---------- - * Local functions - * - * Most of these functions used to use fixed-size buffers to build their - * results. Now, they take an (already initialized) StringInfo object - * as a parameter, and append their text output to its contents. - * ---------- - */ -static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used); -static void set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces); -static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); -static void set_using_names(deparse_namespace *dpns, Node *jtnode, - List *parentUsing); -static void set_relation_column_names(deparse_namespace *dpns, - RangeTblEntry *rte, - deparse_columns *colinfo); -static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo); -static bool colname_is_unique(const char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static char *make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo); -static void expand_colnames_array_to(deparse_columns *colinfo, int n); -static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo); -static void flatten_join_using_qual(Node *qual, - List **leftvars, List **rightvars); -static char *get_rtable_name(int rtindex, deparse_context *context); -static void set_deparse_planstate(deparse_namespace *dpns, PlanState *ps); -static void push_child_plan(deparse_namespace *dpns, PlanState *ps, - deparse_namespace *save_dpns); -static void pop_child_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns); -static void pop_ancestor_plan(deparse_namespace *dpns, - deparse_namespace *save_dpns); -static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent); -static void get_query_def_extended(Query *query, StringInfo buf, - List *parentnamespace, Oid distrelid, int64 shardid, - TupleDesc resultDesc, int prettyFlags, int wrapColumn, - int startIndent); -static void get_values_def(List *values_lists, deparse_context *context); -static void get_with_clause(Query *query, deparse_context *context); -static void get_select_query_def(Query *query, deparse_context *context, - TupleDesc resultDesc); -static void get_insert_query_def(Query *query, deparse_context *context); -static void get_update_query_def(Query *query, deparse_context *context); -static void get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, - RangeTblEntry *rte); -static void get_delete_query_def(Query *query, deparse_context *context); -static void get_utility_query_def(Query *query, deparse_context *context); -static void get_basic_select_query(Query *query, deparse_context *context, - TupleDesc resultDesc); -static void get_target_list(List *targetList, deparse_context *context, - TupleDesc resultDesc); -static void get_setop_query(Node *setOp, Query *query, - deparse_context *context, - TupleDesc resultDesc); -static Node *get_rule_sortgroupclause(Index ref, List *tlist, - bool force_colno, - deparse_context *context); -static void get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context); -static void get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context); -static void get_rule_windowclause(Query *query, deparse_context *context); -static void get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context); -static char *get_variable(Var *var, int levelsup, bool istoplevel, - deparse_context *context); -static void get_special_variable(Node *node, deparse_context *context, - void *private); -static void resolve_special_varno(Node *node, deparse_context *context, - void *private, - void (*callback) (Node *, deparse_context *, void *)); -static Node *find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p); -static void get_parameter(Param *param, deparse_context *context); -static const char *get_simple_binary_op_name(OpExpr *expr); -static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); -static void appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus); -static void removeStringInfoSpaces(StringInfo str); -static void get_rule_expr(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit); -static void get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit); -static bool looks_like_function(Node *node); -static void get_oper_expr(OpExpr *expr, deparse_context *context); -static void get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit); -static void get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref); -static void get_agg_combine_expr(Node *node, deparse_context *context, - void *private); -static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); -static void get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode); -static void get_const_expr(Const *constval, deparse_context *context, - int showtype); -static void get_const_collation(Const *constval, deparse_context *context); -static void simple_quote_literal(StringInfo buf, const char *val); -static void get_sublink_expr(SubLink *sublink, deparse_context *context); -static void get_tablefunc(TableFunc *tf, deparse_context *context, - bool showimplicit); -static void get_from_clause(Query *query, const char *prefix, - deparse_context *context); -static void get_from_clause_item(Node *jtnode, Query *query, - deparse_context *context); -static void get_column_alias_list(deparse_columns *colinfo, - deparse_context *context); -static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context); -static void get_tablesample_def(TableSampleClause *tablesample, - deparse_context *context); -static void get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf); -static Node *processIndirection(Node *node, deparse_context *context); -static void printSubscripts(SubscriptingRef *aref, deparse_context *context); -static char *get_relation_name(Oid relid); -static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, - int64 shardid, List *namespaces); -static char *generate_rte_shard_name(RangeTblEntry *rangeTableEntry); -static char *generate_fragment_name(char *schemaName, char *tableName); -static char *generate_function_name(Oid funcid, int nargs, - List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - ParseExprKind special_exprkind); - -#define only_marker(rte) ((rte)->inh ? "" : "ONLY ") - - - -/* - * pg_get_query_def parses back one query tree, and outputs the resulting query - * string into given buffer. - */ -void -pg_get_query_def(Query *query, StringInfo buffer) -{ - get_query_def(query, buffer, NIL, NULL, 0, WRAP_COLUMN_DEFAULT, 0); -} - -/* - * get_merged_argument_list merges both IN and OUT arguments lists into one and also - * eliminates the INOUT duplicates(present in both the lists). - */ -bool -get_merged_argument_list(CallStmt *stmt, List **mergedNamedArgList, - Oid **mergedNamedArgTypes, - List **mergedArgumentList, - int *totalArguments) -{ - /* No OUT argument support in Postgres 12 */ - return false; -} - -/* - * pg_get_rule_expr deparses an expression and returns the result as a string. - */ -char * -pg_get_rule_expr(Node *expression) -{ - bool showImplicitCasts = true; - deparse_context context; - StringInfo buffer = makeStringInfo(); - - PushOverrideEmptySearchPath(CurrentMemoryContext); - - context.buf = buffer; - context.namespaces = NIL; - context.windowClause = NIL; - context.windowTList = NIL; - context.varprefix = false; - context.prettyFlags = 0; - context.wrapColumn = WRAP_COLUMN_DEFAULT; - context.indentLevel = 0; - context.special_exprkind = EXPR_KIND_NONE; - context.distrelid = InvalidOid; - context.shardid = INVALID_SHARD_ID; - - get_rule_expr(expression, &context, showImplicitCasts); - - /* revert back to original search_path */ - PopOverrideSearchPath(); - - return buffer->data; -} - - -/* - * set_rtable_names: select RTE aliases to be used in printing a query - * - * We fill in dpns->rtable_names with a list of names that is one-for-one with - * the already-filled dpns->rtable list. Each RTE name is unique among those - * in the new namespace plus any ancestor namespaces listed in - * parent_namespaces. - * - * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. - * - * Note that this function is only concerned with relation names, not column - * names. - */ -static void -set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, - Bitmapset *rels_used) -{ - HASHCTL hash_ctl; - HTAB *names_hash; - NameHashEntry *hentry; - bool found; - int rtindex; - ListCell *lc; - - dpns->rtable_names = NIL; - /* nothing more to do if empty rtable */ - if (dpns->rtable == NIL) - return; - - /* - * We use a hash table to hold known names, so that this process is O(N) - * not O(N^2) for N names. - */ - MemSet(&hash_ctl, 0, sizeof(hash_ctl)); - hash_ctl.keysize = NAMEDATALEN; - hash_ctl.entrysize = sizeof(NameHashEntry); - hash_ctl.hcxt = CurrentMemoryContext; - names_hash = hash_create("set_rtable_names names", - list_length(dpns->rtable), - &hash_ctl, - HASH_ELEM | HASH_CONTEXT); - /* Preload the hash table with names appearing in parent_namespaces */ - foreach(lc, parent_namespaces) - { - deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); - ListCell *lc2; - - foreach(lc2, olddpns->rtable_names) - { - char *oldname = (char *) lfirst(lc2); - - if (oldname == NULL) - continue; - hentry = (NameHashEntry *) hash_search(names_hash, - oldname, - HASH_ENTER, - &found); - /* we do not complain about duplicate names in parent namespaces */ - hentry->counter = 0; - } - } - - /* Now we can scan the rtable */ - rtindex = 1; - foreach(lc, dpns->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - char *refname; - - /* Just in case this takes an unreasonable amount of time ... */ - CHECK_FOR_INTERRUPTS(); - - if (rels_used && !bms_is_member(rtindex, rels_used)) - { - /* Ignore unreferenced RTE */ - refname = NULL; - } - else if (rte->alias) - { - /* If RTE has a user-defined alias, prefer that */ - refname = rte->alias->aliasname; - } - else if (rte->rtekind == RTE_RELATION) - { - /* Use the current actual name of the relation */ - refname = get_rel_name(rte->relid); - } - else if (rte->rtekind == RTE_JOIN) - { - /* Unnamed join has no refname */ - refname = NULL; - } - else - { - /* Otherwise use whatever the parser assigned */ - refname = rte->eref->aliasname; - } - - /* - * If the selected name isn't unique, append digits to make it so, and - * make a new hash entry for it once we've got a unique name. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (refname) - { - hentry = (NameHashEntry *) hash_search(names_hash, - refname, - HASH_ENTER, - &found); - if (found) - { - /* Name already in use, must choose a new one */ - int refnamelen = strlen(refname); - char *modname = (char *) palloc(refnamelen + 16); - NameHashEntry *hentry2; - - do - { - hentry->counter++; - for (;;) - { - /* - * We avoid using %.*s here because it can misbehave - * if the data is not valid in what libc thinks is the - * prevailing encoding. - */ - memcpy(modname, refname, refnamelen); - sprintf(modname + refnamelen, "_%d", hentry->counter); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from refname to keep all the digits */ - refnamelen = pg_mbcliplen(refname, refnamelen, - refnamelen - 1); - } - hentry2 = (NameHashEntry *) hash_search(names_hash, - modname, - HASH_ENTER, - &found); - } while (found); - hentry2->counter = 0; /* init new hash entry */ - refname = modname; - } - else - { - /* Name not previously used, need only initialize hentry */ - hentry->counter = 0; - } - } - - dpns->rtable_names = lappend(dpns->rtable_names, refname); - rtindex++; - } - - hash_destroy(names_hash); -} - -/* - * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree - * - * For convenience, this is defined to initialize the deparse_namespace struct - * from scratch. - */ -static void -set_deparse_for_query(deparse_namespace *dpns, Query *query, - List *parent_namespaces) -{ - ListCell *lc; - ListCell *lc2; - - /* Initialize *dpns and fill rtable/ctes links */ - memset(dpns, 0, sizeof(deparse_namespace)); - dpns->rtable = query->rtable; - dpns->ctes = query->cteList; - - /* Assign a unique relation alias to each RTE */ - set_rtable_names(dpns, parent_namespaces, NULL); - - /* Initialize dpns->rtable_columns to contain zeroed structs */ - dpns->rtable_columns = NIL; - while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) - dpns->rtable_columns = lappend(dpns->rtable_columns, - palloc0(sizeof(deparse_columns))); - - /* If it's a utility query, it won't have a jointree */ - if (query->jointree) - { - /* Detect whether global uniqueness of USING names is needed */ - dpns->unique_using = - has_dangerous_join_using(dpns, (Node *) query->jointree); - - /* - * Select names for columns merged by USING, via a recursive pass over - * the query jointree. - */ - set_using_names(dpns, (Node *) query->jointree, NIL); - } - - /* - * Now assign remaining column aliases for each RTE. We do this in a - * linear scan of the rtable, so as to process RTEs whether or not they - * are in the jointree (we mustn't miss NEW.*, INSERT target relations, - * etc). JOIN RTEs must be processed after their children, but this is - * okay because they appear later in the rtable list than their children - * (cf Asserts in identify_join_columns()). - */ - forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); - - if (rte->rtekind == RTE_JOIN) - set_join_column_names(dpns, rte, colinfo); - else - set_relation_column_names(dpns, rte, colinfo); - } -} - -/* - * has_dangerous_join_using: search jointree for unnamed JOIN USING - * - * Merged columns of a JOIN USING may act differently from either of the input - * columns, either because they are merged with COALESCE (in a FULL JOIN) or - * because an implicit coercion of the underlying input column is required. - * In such a case the column must be referenced as a column of the JOIN not as - * a column of either input. And this is problematic if the join is unnamed - * (alias-less): we cannot qualify the column's name with an RTE name, since - * there is none. (Forcibly assigning an alias to the join is not a solution, - * since that will prevent legal references to tables below the join.) - * To ensure that every column in the query is unambiguously referenceable, - * we must assign such merged columns names that are globally unique across - * the whole query, aliasing other columns out of the way as necessary. - * - * Because the ensuing re-aliasing is fairly damaging to the readability of - * the query, we don't do this unless we have to. So, we must pre-scan - * the join tree to see if we have to, before starting set_using_names(). - */ -static bool -has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do here */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - { - if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) - return true; - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - - /* Is it an unnamed JOIN with USING? */ - if (j->alias == NULL && j->usingClause) - { - /* - * Yes, so check each join alias var to see if any of them are not - * simple references to underlying columns. If so, we have a - * dangerous situation and must pick unique aliases. - */ - RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); - ListCell *lc; - - foreach(lc, jrte->joinaliasvars) - { - Var *aliasvar = (Var *) lfirst(lc); - - if (aliasvar != NULL && !IsA(aliasvar, Var)) - return true; - } - } - - /* Nope, but inspect children */ - if (has_dangerous_join_using(dpns, j->larg)) - return true; - if (has_dangerous_join_using(dpns, j->rarg)) - return true; - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); - return false; -} - -/* - * set_using_names: select column aliases to be used for merged USING columns - * - * We do this during a recursive descent of the query jointree. - * dpns->unique_using must already be set to determine the global strategy. - * - * Column alias info is saved in the dpns->rtable_columns list, which is - * assumed to be filled with pre-zeroed deparse_columns structs. - * - * parentUsing is a list of all USING aliases assigned in parent joins of - * the current jointree node. (The passed-in list must not be modified.) - */ -static void -set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) -{ - if (IsA(jtnode, RangeTblRef)) - { - /* nothing to do now */ - } - else if (IsA(jtnode, FromExpr)) - { - FromExpr *f = (FromExpr *) jtnode; - ListCell *lc; - - foreach(lc, f->fromlist) - set_using_names(dpns, (Node *) lfirst(lc), parentUsing); - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - int *leftattnos; - int *rightattnos; - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - int i; - ListCell *lc; - - /* Get info about the shape of the join */ - identify_join_columns(j, rte, colinfo); - leftattnos = colinfo->leftattnos; - rightattnos = colinfo->rightattnos; - - /* Look up the not-yet-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * If this join is unnamed, then we cannot substitute new aliases at - * this level, so any name requirements pushed down to here must be - * pushed down again to the children. - */ - if (rte->alias == NULL) - { - for (i = 0; i < colinfo->num_cols; i++) - { - char *colname = colinfo->colnames[i]; - - if (colname == NULL) - continue; - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - } - } - - /* - * If there's a USING clause, select the USING column names and push - * those names down to the children. We have two strategies: - * - * If dpns->unique_using is true, we force all USING names to be - * unique across the whole query level. In principle we'd only need - * the names of dangerous USING columns to be globally unique, but to - * safely assign all USING names in a single pass, we have to enforce - * the same uniqueness rule for all of them. However, if a USING - * column's name has been pushed down from the parent, we should use - * it as-is rather than making a uniqueness adjustment. This is - * necessary when we're at an unnamed join, and it creates no risk of - * ambiguity. Also, if there's a user-written output alias for a - * merged column, we prefer to use that rather than the input name; - * this simplifies the logic and seems likely to lead to less aliasing - * overall. - * - * If dpns->unique_using is false, we only need USING names to be - * unique within their own join RTE. We still need to honor - * pushed-down names, though. - * - * Though significantly different in results, these two strategies are - * implemented by the same code, with only the difference of whether - * to put assigned names into dpns->using_names. - */ - if (j->usingClause) - { - /* Copy the input parentUsing list so we don't modify it */ - parentUsing = list_copy(parentUsing); - - /* USING names must correspond to the first join output columns */ - expand_colnames_array_to(colinfo, list_length(j->usingClause)); - i = 0; - foreach(lc, j->usingClause) - { - char *colname = strVal(lfirst(lc)); - - /* Assert it's a merged column */ - Assert(leftattnos[i] != 0 && rightattnos[i] != 0); - - /* Adopt passed-down name if any, else select unique name */ - if (colinfo->colnames[i] != NULL) - colname = colinfo->colnames[i]; - else - { - /* Prefer user-written output alias if any */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - /* Make it appropriately unique */ - colname = make_colname_unique(colname, dpns, colinfo); - if (dpns->unique_using) - dpns->using_names = lappend(dpns->using_names, - colname); - /* Save it as output column name, too */ - colinfo->colnames[i] = colname; - } - - /* Remember selected names for use later */ - colinfo->usingNames = lappend(colinfo->usingNames, colname); - parentUsing = lappend(parentUsing, colname); - - /* Push down to left column, unless it's a system column */ - if (leftattnos[i] > 0) - { - expand_colnames_array_to(leftcolinfo, leftattnos[i]); - leftcolinfo->colnames[leftattnos[i] - 1] = colname; - } - - /* Same on the righthand side */ - if (rightattnos[i] > 0) - { - expand_colnames_array_to(rightcolinfo, rightattnos[i]); - rightcolinfo->colnames[rightattnos[i] - 1] = colname; - } - - i++; - } - } - - /* Mark child deparse_columns structs with correct parentUsing info */ - leftcolinfo->parentUsing = parentUsing; - rightcolinfo->parentUsing = parentUsing; - - /* Now recursively assign USING column names in children */ - set_using_names(dpns, j->larg, parentUsing); - set_using_names(dpns, j->rarg, parentUsing); - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * set_relation_column_names: select column aliases for a non-join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. - */ -static void -set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - int ncolumns; - char **real_colnames; - bool changed_any; - bool has_anonymous; - int noldcolumns; - int i; - int j; - - /* - * Extract the RTE's "real" column names. This is comparable to - * get_rte_attribute_name, except that it's important to disregard dropped - * columns. We put NULL into the array for a dropped column. - */ - if (rte->rtekind == RTE_RELATION) - { - /* Relation --- look to the system catalogs for up-to-date info */ - Relation rel; - TupleDesc tupdesc; - - rel = relation_open(rte->relid, AccessShareLock); - tupdesc = RelationGetDescr(rel); - - ncolumns = tupdesc->natts; - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - for (i = 0; i < ncolumns; i++) - { - Form_pg_attribute attr = TupleDescAttr(tupdesc, i); - - if (attr->attisdropped) - real_colnames[i] = NULL; - else - real_colnames[i] = pstrdup(NameStr(attr->attname)); - } - relation_close(rel, AccessShareLock); - } - else - { - /* Otherwise use the column names from eref */ - ListCell *lc; - - ncolumns = list_length(rte->eref->colnames); - real_colnames = (char **) palloc(ncolumns * sizeof(char *)); - - i = 0; - foreach(lc, rte->eref->colnames) - { - /* - * If the column name shown in eref is an empty string, then it's - * a column that was dropped at the time of parsing the query, so - * treat it as dropped. - */ - char *cname = strVal(lfirst(lc)); - - if (cname[0] == '\0') - cname = NULL; - real_colnames[i] = cname; - i++; - } - } - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that there are now more columns than there were when the - * query was parsed, ie colnames could be longer than rte->eref->colnames. - * We must assign unique aliases to the new columns too, else there could - * be unresolved conflicts when the view/rule is reloaded. - */ - expand_colnames_array_to(colinfo, ncolumns); - Assert(colinfo->num_cols == ncolumns); - - /* - * Make sufficiently large new_colnames and is_new_col arrays, too. - * - * Note: because we leave colinfo->num_new_cols zero until after the loop, - * colname_is_unique will not consult that array, which is fine because it - * would only be duplicate effort. - */ - colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); - - /* - * Scan the columns, select a unique alias for each one, and store it in - * colinfo->colnames and colinfo->new_colnames. The former array has NULL - * entries for dropped columns, the latter omits them. Also mark - * new_colnames entries as to whether they are new since parse time; this - * is the case for entries beyond the length of rte->eref->colnames. - */ - noldcolumns = list_length(rte->eref->colnames); - changed_any = false; - has_anonymous = false; - j = 0; - for (i = 0; i < ncolumns; i++) - { - char *real_colname = real_colnames[i]; - char *colname = colinfo->colnames[i]; - - /* Skip dropped columns */ - if (real_colname == NULL) - { - Assert(colname == NULL); /* colnames[i] is already NULL */ - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Put names of non-dropped columns in new_colnames[] too */ - colinfo->new_colnames[j] = colname; - /* And mark them as new or not */ - colinfo->is_new_col[j] = (i >= noldcolumns); - j++; - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - - /* - * Remember if there is a reference to an anonymous column as named by - * char * FigureColname(Node *node) - */ - if (!has_anonymous && strcmp(real_colname, "?column?") == 0) - has_anonymous = true; - } - - /* - * Set correct length for new_colnames[] array. (Note: if columns have - * been added, colinfo->num_cols includes them, which is not really quite - * right but is harmless, since any new columns must be at the end where - * they won't affect varattnos of pre-existing columns.) - */ - colinfo->num_new_cols = j; - - /* - * For a relation RTE, we need only print the alias column names if any - * are different from the underlying "real" names. For a function RTE, - * always emit a complete column alias list; this is to protect against - * possible instability of the default column names (eg, from altering - * parameter names). For tablefunc RTEs, we never print aliases, because - * the column names are part of the clause itself. For other RTE types, - * print if we changed anything OR if there were user-written column - * aliases (since the latter would be part of the underlying "reality"). - */ - if (rte->rtekind == RTE_RELATION) - colinfo->printaliases = changed_any; - else if (rte->rtekind == RTE_FUNCTION) - colinfo->printaliases = true; - else if (rte->rtekind == RTE_TABLEFUNC) - colinfo->printaliases = false; - else if (rte->alias && rte->alias->colnames != NIL) - colinfo->printaliases = true; - else - colinfo->printaliases = changed_any || has_anonymous; -} - -/* - * set_join_column_names: select column aliases for a join RTE - * - * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. - * If any colnames entries are already filled in, those override local - * choices. Also, names for USING columns were already chosen by - * set_using_names(). We further expect that column alias selection has been - * completed for both input RTEs. - */ -static void -set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, - deparse_columns *colinfo) -{ - deparse_columns *leftcolinfo; - deparse_columns *rightcolinfo; - bool changed_any; - int noldcolumns; - int nnewcolumns; - Bitmapset *leftmerged = NULL; - Bitmapset *rightmerged = NULL; - int i; - int j; - int ic; - int jc; - - /* Look up the previously-filled-in child deparse_columns structs */ - leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); - rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); - - /* - * Ensure colinfo->colnames has a slot for each column. (It could be long - * enough already, if we pushed down a name for the last column.) Note: - * it's possible that one or both inputs now have more columns than there - * were when the query was parsed, but we'll deal with that below. We - * only need entries in colnames for pre-existing columns. - */ - noldcolumns = list_length(rte->eref->colnames); - expand_colnames_array_to(colinfo, noldcolumns); - Assert(colinfo->num_cols == noldcolumns); - - /* - * Scan the join output columns, select an alias for each one, and store - * it in colinfo->colnames. If there are USING columns, set_using_names() - * already selected their names, so we can start the loop at the first - * non-merged column. - */ - changed_any = false; - for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) - { - char *colname = colinfo->colnames[i]; - char *real_colname; - - /* Ignore dropped column (only possible for non-merged column) */ - if (colinfo->leftattnos[i] == 0 && colinfo->rightattnos[i] == 0) - { - Assert(colname == NULL); - continue; - } - - /* Get the child column name */ - if (colinfo->leftattnos[i] > 0) - real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; - else if (colinfo->rightattnos[i] > 0) - real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; - else - { - /* We're joining system columns --- use eref name */ - real_colname = strVal(list_nth(rte->eref->colnames, i)); - } - Assert(real_colname != NULL); - - /* In an unnamed join, just report child column names as-is */ - if (rte->alias == NULL) - { - colinfo->colnames[i] = real_colname; - continue; - } - - /* If alias already assigned, that's what to use */ - if (colname == NULL) - { - /* If user wrote an alias, prefer that over real column name */ - if (rte->alias && i < list_length(rte->alias->colnames)) - colname = strVal(list_nth(rte->alias->colnames, i)); - else - colname = real_colname; - - /* Unique-ify and insert into colinfo */ - colname = make_colname_unique(colname, dpns, colinfo); - - colinfo->colnames[i] = colname; - } - - /* Remember if any assigned aliases differ from "real" name */ - if (!changed_any && strcmp(colname, real_colname) != 0) - changed_any = true; - } - - /* - * Calculate number of columns the join would have if it were re-parsed - * now, and create storage for the new_colnames and is_new_col arrays. - * - * Note: colname_is_unique will be consulting new_colnames[] during the - * loops below, so its not-yet-filled entries must be zeroes. - */ - nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - - list_length(colinfo->usingNames); - colinfo->num_new_cols = nnewcolumns; - colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); - colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); - - /* - * Generating the new_colnames array is a bit tricky since any new columns - * added since parse time must be inserted in the right places. This code - * must match the parser, which will order a join's columns as merged - * columns first (in USING-clause order), then non-merged columns from the - * left input (in attnum order), then non-merged columns from the right - * input (ditto). If one of the inputs is itself a join, its columns will - * be ordered according to the same rule, which means newly-added columns - * might not be at the end. We can figure out what's what by consulting - * the leftattnos and rightattnos arrays plus the input is_new_col arrays. - * - * In these loops, i indexes leftattnos/rightattnos (so it's join varattno - * less one), j indexes new_colnames/is_new_col, and ic/jc have similar - * meanings for the current child RTE. - */ - - /* Handle merged columns; they are first and can't be new */ - i = j = 0; - while (i < noldcolumns && - colinfo->leftattnos[i] != 0 && - colinfo->rightattnos[i] != 0) - { - /* column name is already determined and known unique */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - colinfo->is_new_col[j] = false; - - /* build bitmapsets of child attnums of merged columns */ - if (colinfo->leftattnos[i] > 0) - leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); - if (colinfo->rightattnos[i] > 0) - rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); - - i++, j++; - } - - /* Handle non-merged left-child columns */ - ic = 0; - for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) - { - char *child_colname = leftcolinfo->new_colnames[jc]; - - if (!leftcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of left child */ - while (ic < leftcolinfo->num_cols && - leftcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < leftcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, leftmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->leftattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; - j++; - } - - /* Handle non-merged right-child columns in exactly the same way */ - ic = 0; - for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) - { - char *child_colname = rightcolinfo->new_colnames[jc]; - - if (!rightcolinfo->is_new_col[jc]) - { - /* Advance ic to next non-dropped old column of right child */ - while (ic < rightcolinfo->num_cols && - rightcolinfo->colnames[ic] == NULL) - ic++; - Assert(ic < rightcolinfo->num_cols); - ic++; - /* If it is a merged column, we already processed it */ - if (bms_is_member(ic, rightmerged)) - continue; - /* Else, advance i to the corresponding existing join column */ - while (i < colinfo->num_cols && - colinfo->colnames[i] == NULL) - i++; - Assert(i < colinfo->num_cols); - Assert(ic == colinfo->rightattnos[i]); - /* Use the already-assigned name of this column */ - colinfo->new_colnames[j] = colinfo->colnames[i]; - i++; - } - else - { - /* - * Unique-ify the new child column name and assign, unless we're - * in an unnamed join, in which case just copy - */ - if (rte->alias != NULL) - { - colinfo->new_colnames[j] = - make_colname_unique(child_colname, dpns, colinfo); - if (!changed_any && - strcmp(colinfo->new_colnames[j], child_colname) != 0) - changed_any = true; - } - else - colinfo->new_colnames[j] = child_colname; - } - - colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; - j++; - } - - /* Assert we processed the right number of columns */ -#ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; - Assert(i == colinfo->num_cols); - Assert(j == nnewcolumns); -#endif - - /* - * For a named join, print column aliases if we changed any from the child - * names. Unnamed joins cannot print aliases. - */ - if (rte->alias != NULL) - colinfo->printaliases = changed_any; - else - colinfo->printaliases = false; -} - -/* - * colname_is_unique: is colname distinct from already-chosen column names? - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static bool -colname_is_unique(const char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - int i; - ListCell *lc; - - /* Check against already-assigned column aliases within RTE */ - for (i = 0; i < colinfo->num_cols; i++) - { - char *oldname = colinfo->colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* - * If we're building a new_colnames array, check that too (this will be - * partially but not completely redundant with the previous checks) - */ - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *oldname = colinfo->new_colnames[i]; - - if (oldname && strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against USING-column names that must be globally unique */ - foreach(lc, dpns->using_names) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - /* Also check against names already assigned for parent-join USING cols */ - foreach(lc, colinfo->parentUsing) - { - char *oldname = (char *) lfirst(lc); - - if (strcmp(oldname, colname) == 0) - return false; - } - - return true; -} - -/* - * make_colname_unique: modify colname if necessary to make it unique - * - * dpns is query-wide info, colinfo is for the column's RTE - */ -static char * -make_colname_unique(char *colname, deparse_namespace *dpns, - deparse_columns *colinfo) -{ - /* - * If the selected name isn't unique, append digits to make it so. For a - * very long input name, we might have to truncate to stay within - * NAMEDATALEN. - */ - if (!colname_is_unique(colname, dpns, colinfo)) - { - int colnamelen = strlen(colname); - char *modname = (char *) palloc(colnamelen + 16); - int i = 0; - - do - { - i++; - for (;;) - { - /* - * We avoid using %.*s here because it can misbehave if the - * data is not valid in what libc thinks is the prevailing - * encoding. - */ - memcpy(modname, colname, colnamelen); - sprintf(modname + colnamelen, "_%d", i); - if (strlen(modname) < NAMEDATALEN) - break; - /* drop chars from colname to keep all the digits */ - colnamelen = pg_mbcliplen(colname, colnamelen, - colnamelen - 1); - } - } while (!colname_is_unique(modname, dpns, colinfo)); - colname = modname; - } - return colname; -} - -/* - * expand_colnames_array_to: make colinfo->colnames at least n items long - * - * Any added array entries are initialized to zero. - */ -static void -expand_colnames_array_to(deparse_columns *colinfo, int n) -{ - if (n > colinfo->num_cols) - { - if (colinfo->colnames == NULL) - colinfo->colnames = (char **) palloc0(n * sizeof(char *)); - else - { - colinfo->colnames = (char **) repalloc(colinfo->colnames, - n * sizeof(char *)); - memset(colinfo->colnames + colinfo->num_cols, 0, - (n - colinfo->num_cols) * sizeof(char *)); - } - colinfo->num_cols = n; - } -} - -/* - * identify_join_columns: figure out where columns of a join come from - * - * Fills the join-specific fields of the colinfo struct, except for - * usingNames which is filled later. - */ -static void -identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, - deparse_columns *colinfo) -{ - int numjoincols; - int i; - ListCell *lc; - - /* Extract left/right child RT indexes */ - if (IsA(j->larg, RangeTblRef)) - colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; - else if (IsA(j->larg, JoinExpr)) - colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->larg)); - if (IsA(j->rarg, RangeTblRef)) - colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; - else if (IsA(j->rarg, JoinExpr)) - colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; - else - elog(ERROR, "unrecognized node type in jointree: %d", - (int) nodeTag(j->rarg)); - - /* Assert children will be processed earlier than join in second pass */ - Assert(colinfo->leftrti < j->rtindex); - Assert(colinfo->rightrti < j->rtindex); - - /* Initialize result arrays with zeroes */ - numjoincols = list_length(jrte->joinaliasvars); - Assert(numjoincols == list_length(jrte->eref->colnames)); - colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); - colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); - - /* Scan the joinaliasvars list to identify simple column references */ - i = 0; - foreach(lc, jrte->joinaliasvars) - { - Var *aliasvar = (Var *) lfirst(lc); - - /* get rid of any implicit coercion above the Var */ - aliasvar = (Var *) strip_implicit_coercions((Node *) aliasvar); - - if (aliasvar == NULL) - { - /* It's a dropped column; nothing to do here */ - } - else if (IsA(aliasvar, Var)) - { - Assert(aliasvar->varlevelsup == 0); - Assert(aliasvar->varattno != 0); - if (aliasvar->varno == colinfo->leftrti) - colinfo->leftattnos[i] = aliasvar->varattno; - else if (aliasvar->varno == colinfo->rightrti) - colinfo->rightattnos[i] = aliasvar->varattno; - else - elog(ERROR, "unexpected varno %d in JOIN RTE", - aliasvar->varno); - } - else if (IsA(aliasvar, CoalesceExpr)) - { - /* - * It's a merged column in FULL JOIN USING. Ignore it for now and - * let the code below identify the merged columns. - */ - } - else - elog(ERROR, "unrecognized node type in join alias vars: %d", - (int) nodeTag(aliasvar)); - - i++; - } - - /* - * If there's a USING clause, deconstruct the join quals to identify the - * merged columns. This is a tad painful but if we cannot rely on the - * column names, there is no other representation of which columns were - * joined by USING. (Unless the join type is FULL, we can't tell from the - * joinaliasvars list which columns are merged.) Note: we assume that the - * merged columns are the first output column(s) of the join. - */ - if (j->usingClause) - { - List *leftvars = NIL; - List *rightvars = NIL; - ListCell *lc2; - - /* Extract left- and right-side Vars from the qual expression */ - flatten_join_using_qual(j->quals, &leftvars, &rightvars); - Assert(list_length(leftvars) == list_length(j->usingClause)); - Assert(list_length(rightvars) == list_length(j->usingClause)); - - /* Mark the output columns accordingly */ - i = 0; - forboth(lc, leftvars, lc2, rightvars) - { - Var *leftvar = (Var *) lfirst(lc); - Var *rightvar = (Var *) lfirst(lc2); - - Assert(leftvar->varlevelsup == 0); - Assert(leftvar->varattno != 0); - if (leftvar->varno != colinfo->leftrti) - elog(ERROR, "unexpected varno %d in JOIN USING qual", - leftvar->varno); - colinfo->leftattnos[i] = leftvar->varattno; - - Assert(rightvar->varlevelsup == 0); - Assert(rightvar->varattno != 0); - if (rightvar->varno != colinfo->rightrti) - elog(ERROR, "unexpected varno %d in JOIN USING qual", - rightvar->varno); - colinfo->rightattnos[i] = rightvar->varattno; - - i++; - } - } -} - -/* - * flatten_join_using_qual: extract Vars being joined from a JOIN/USING qual - * - * We assume that transformJoinUsingClause won't have produced anything except - * AND nodes, equality operator nodes, and possibly implicit coercions, and - * that the AND node inputs match left-to-right with the original USING list. - * - * Caller must initialize the result lists to NIL. - */ -static void -flatten_join_using_qual(Node *qual, List **leftvars, List **rightvars) -{ - if (IsA(qual, BoolExpr)) - { - /* Handle AND nodes by recursion */ - BoolExpr *b = (BoolExpr *) qual; - ListCell *lc; - - Assert(b->boolop == AND_EXPR); - foreach(lc, b->args) - { - flatten_join_using_qual((Node *) lfirst(lc), - leftvars, rightvars); - } - } - else if (IsA(qual, OpExpr)) - { - /* Otherwise we should have an equality operator */ - OpExpr *op = (OpExpr *) qual; - Var *var; - - if (list_length(op->args) != 2) - elog(ERROR, "unexpected unary operator in JOIN/USING qual"); - /* Arguments should be Vars with perhaps implicit coercions */ - var = (Var *) strip_implicit_coercions((Node *) linitial(op->args)); - if (!IsA(var, Var)) - elog(ERROR, "unexpected node type in JOIN/USING qual: %d", - (int) nodeTag(var)); - *leftvars = lappend(*leftvars, var); - var = (Var *) strip_implicit_coercions((Node *) lsecond(op->args)); - if (!IsA(var, Var)) - elog(ERROR, "unexpected node type in JOIN/USING qual: %d", - (int) nodeTag(var)); - *rightvars = lappend(*rightvars, var); - } - else - { - /* Perhaps we have an implicit coercion to boolean? */ - Node *q = strip_implicit_coercions(qual); - - if (q != qual) - flatten_join_using_qual(q, leftvars, rightvars); - else - elog(ERROR, "unexpected node type in JOIN/USING qual: %d", - (int) nodeTag(qual)); - } -} - -/* - * get_rtable_name: convenience function to get a previously assigned RTE alias - * - * The RTE must belong to the topmost namespace level in "context". - */ -static char * -get_rtable_name(int rtindex, deparse_context *context) -{ - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); - return (char *) list_nth(dpns->rtable_names, rtindex - 1); -} - -/* - * set_deparse_planstate: set up deparse_namespace to parse subexpressions - * of a given PlanState node - * - * This sets the planstate, outer_planstate, inner_planstate, outer_tlist, - * inner_tlist, and index_tlist fields. Caller is responsible for adjusting - * the ancestors list if necessary. Note that the rtable and ctes fields do - * not need to change when shifting attention to different plan nodes in a - * single plan tree. - */ -static void -set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) -{ - dpns->planstate = ps; - - /* - * We special-case Append and MergeAppend to pretend that the first child - * plan is the OUTER referent; we have to interpret OUTER Vars in their - * tlists according to one of the children, and the first one is the most - * natural choice. Likewise special-case ModifyTable to pretend that the - * first child plan is the OUTER referent; this is to support RETURNING - * lists containing references to non-target relations. - */ - if (IsA(ps, AppendState)) - dpns->outer_planstate = ((AppendState *) ps)->appendplans[0]; - else if (IsA(ps, MergeAppendState)) - dpns->outer_planstate = ((MergeAppendState *) ps)->mergeplans[0]; - else if (IsA(ps, ModifyTableState)) - dpns->outer_planstate = ((ModifyTableState *) ps)->mt_plans[0]; - else - dpns->outer_planstate = outerPlanState(ps); - - if (dpns->outer_planstate) - dpns->outer_tlist = dpns->outer_planstate->plan->targetlist; - else - dpns->outer_tlist = NIL; - - /* - * For a SubqueryScan, pretend the subplan is INNER referent. (We don't - * use OUTER because that could someday conflict with the normal meaning.) - * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. - * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the - * excluded expression's tlist. (Similar to the SubqueryScan we don't want - * to reuse OUTER, it's used for RETURNING in some modify table cases, - * although not INSERT .. CONFLICT). - */ - if (IsA(ps, SubqueryScanState)) - dpns->inner_planstate = ((SubqueryScanState *) ps)->subplan; - else if (IsA(ps, CteScanState)) - dpns->inner_planstate = ((CteScanState *) ps)->cteplanstate; - else if (IsA(ps, ModifyTableState)) - dpns->inner_planstate = ps; - else - dpns->inner_planstate = innerPlanState(ps); - - if (IsA(ps, ModifyTableState)) - dpns->inner_tlist = ((ModifyTableState *) ps)->mt_excludedtlist; - else if (dpns->inner_planstate) - dpns->inner_tlist = dpns->inner_planstate->plan->targetlist; - else - dpns->inner_tlist = NIL; - - /* Set up referent for INDEX_VAR Vars, if needed */ - if (IsA(ps->plan, IndexOnlyScan)) - dpns->index_tlist = ((IndexOnlyScan *) ps->plan)->indextlist; - else if (IsA(ps->plan, ForeignScan)) - dpns->index_tlist = ((ForeignScan *) ps->plan)->fdw_scan_tlist; - else if (IsA(ps->plan, CustomScan)) - dpns->index_tlist = ((CustomScan *) ps->plan)->custom_scan_tlist; - else - dpns->index_tlist = NIL; -} - -/* - * push_child_plan: temporarily transfer deparsing attention to a child plan - * - * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the - * deparse context in case the referenced expression itself uses - * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid - * affecting levelsup issues (although in a Plan tree there really shouldn't - * be any). - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_child_plan. - */ -static void -push_child_plan(deparse_namespace *dpns, PlanState *ps, - deparse_namespace *save_dpns) -{ - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Link current plan node into ancestors list */ - dpns->ancestors = lcons(dpns->planstate, dpns->ancestors); - - /* Set attention on selected child */ - set_deparse_planstate(dpns, ps); -} - -/* - * pop_child_plan: undo the effects of push_child_plan - */ -static void -pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - List *ancestors; - - /* Get rid of ancestors list cell added by push_child_plan */ - ancestors = list_delete_first(dpns->ancestors); - - /* Restore fields changed by push_child_plan */ - *dpns = *save_dpns; - - /* Make sure dpns->ancestors is right (may be unnecessary) */ - dpns->ancestors = ancestors; -} - -/* - * push_ancestor_plan: temporarily transfer deparsing attention to an - * ancestor plan - * - * When expanding a Param reference, we must adjust the deparse context - * to match the plan node that contains the expression being printed; - * otherwise we'd fail if that expression itself contains a Param or - * OUTER_VAR/INNER_VAR/INDEX_VAR variable. - * - * The target ancestor is conveniently identified by the ListCell holding it - * in dpns->ancestors. - * - * Caller must provide a local deparse_namespace variable to save the - * previous state for pop_ancestor_plan. - */ -static void -push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, - deparse_namespace *save_dpns) -{ - PlanState *ps = (PlanState *) lfirst(ancestor_cell); - List *ancestors; - - /* Save state for restoration later */ - *save_dpns = *dpns; - - /* Build a new ancestor list with just this node's ancestors */ - ancestors = NIL; - while ((ancestor_cell = lnext(ancestor_cell)) != NULL) - ancestors = lappend(ancestors, lfirst(ancestor_cell)); - dpns->ancestors = ancestors; - - /* Set attention on selected ancestor */ - set_deparse_planstate(dpns, ps); -} - -/* - * pop_ancestor_plan: undo the effects of push_ancestor_plan - */ -static void -pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) -{ - /* Free the ancestor list made in push_ancestor_plan */ - list_free(dpns->ancestors); - - /* Restore fields changed by push_ancestor_plan */ - *dpns = *save_dpns; -} - - -/* ---------- - * deparse_shard_query - Parse back a query for execution on a shard - * - * Builds an SQL string to perform the provided query on a specific shard and - * places this string into the provided buffer. - * ---------- - */ -void -deparse_shard_query(Query *query, Oid distrelid, int64 shardid, - StringInfo buffer) -{ - get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, 0, - WRAP_COLUMN_DEFAULT, 0); -} - - -/* ---------- - * get_query_def - Parse back one query parsetree - * - * If resultDesc is not NULL, then it is the output tuple descriptor for - * the view represented by a SELECT query. - * ---------- - */ -static void -get_query_def(Query *query, StringInfo buf, List *parentnamespace, - TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent) -{ - get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, - prettyFlags, wrapColumn, startIndent); -} - - -/* ---------- - * get_query_def_extended - Parse back one query parsetree, optionally - * with extension using a shard identifier. - * - * If distrelid is valid and shardid is positive, the provided shardid is added - * any time the provided relid is deparsed, so that the query may be executed - * on a placement for the given shard. - * ---------- - */ -static void -get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, - Oid distrelid, int64 shardid, TupleDesc resultDesc, - int prettyFlags, int wrapColumn, int startIndent) -{ - deparse_context context; - deparse_namespace dpns; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed - * querytree! - * - * We are only deparsing the query (we are not about to execute it), so we - * only need AccessShareLock on the relations it mentions. - */ - AcquireRewriteLocks(query, false, false); - - PushOverrideEmptySearchPath(CurrentMemoryContext); - - context.buf = buf; - context.namespaces = lcons(&dpns, list_copy(parentnamespace)); - context.windowClause = NIL; - context.windowTList = NIL; - context.varprefix = (parentnamespace != NIL || - list_length(query->rtable) != 1); - context.prettyFlags = prettyFlags; - context.wrapColumn = wrapColumn; - context.indentLevel = startIndent; - context.special_exprkind = EXPR_KIND_NONE; - context.distrelid = distrelid; - context.shardid = shardid; - - set_deparse_for_query(&dpns, query, parentnamespace); - - switch (query->commandType) - { - case CMD_SELECT: - get_select_query_def(query, &context, resultDesc); - break; - - case CMD_UPDATE: - get_update_query_def(query, &context); - break; - - case CMD_INSERT: - get_insert_query_def(query, &context); - break; - - case CMD_DELETE: - get_delete_query_def(query, &context); - break; - - case CMD_NOTHING: - appendStringInfoString(buf, "NOTHING"); - break; - - case CMD_UTILITY: - get_utility_query_def(query, &context); - break; - - default: - elog(ERROR, "unrecognized query command type: %d", - query->commandType); - break; - } - - /* revert back to original search_path */ - PopOverrideSearchPath(); -} - -/* ---------- - * get_values_def - Parse back a VALUES list - * ---------- - */ -static void -get_values_def(List *values_lists, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first_list = true; - ListCell *vtl; - - appendStringInfoString(buf, "VALUES "); - - foreach(vtl, values_lists) - { - List *sublist = (List *) lfirst(vtl); - bool first_col = true; - ListCell *lc; - - if (first_list) - first_list = false; - else - appendStringInfoString(buf, ", "); - - appendStringInfoChar(buf, '('); - foreach(lc, sublist) - { - Node *col = (Node *) lfirst(lc); - - if (first_col) - first_col = false; - else - appendStringInfoChar(buf, ','); - - /* - * Print the value. Whole-row Vars need special treatment. - */ - get_rule_expr_toplevel(col, context, false); - } - appendStringInfoChar(buf, ')'); - } -} - -/* ---------- - * get_with_clause - Parse back a WITH clause - * ---------- - */ -static void -get_with_clause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - if (query->cteList == NIL) - return; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - if (query->hasRecursive) - sep = "WITH RECURSIVE "; - else - sep = "WITH "; - foreach(l, query->cteList) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); - - appendStringInfoString(buf, sep); - appendStringInfoString(buf, quote_identifier(cte->ctename)); - if (cte->aliascolnames) - { - bool first = true; - ListCell *col; - - appendStringInfoChar(buf, '('); - foreach(col, cte->aliascolnames) - { - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, - quote_identifier(strVal(lfirst(col)))); - } - appendStringInfoChar(buf, ')'); - } - appendStringInfoString(buf, " AS "); - switch (cte->ctematerialized) - { - case CTEMaterializeDefault: - break; - case CTEMaterializeAlways: - appendStringInfoString(buf, "MATERIALIZED "); - break; - case CTEMaterializeNever: - appendStringInfoString(buf, "NOT MATERIALIZED "); - break; - } - appendStringInfoChar(buf, '('); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", 0, 0, 0); - appendStringInfoChar(buf, ')'); - sep = ", "; - } - - if (PRETTY_INDENT(context)) - { - context->indentLevel -= PRETTYINDENT_STD; - appendContextKeyword(context, "", 0, 0, 0); - } - else - appendStringInfoChar(buf, ' '); -} - -/* ---------- - * get_select_query_def - Parse back a SELECT parsetree - * ---------- - */ -static void -get_select_query_def(Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - List *save_windowclause; - List *save_windowtlist; - bool force_colno; - ListCell *l; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* Set up context for possible window functions */ - save_windowclause = context->windowClause; - context->windowClause = query->windowClause; - save_windowtlist = context->windowTList; - context->windowTList = query->targetList; - - /* - * If the Query node has a setOperations tree, then it's the top level of - * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT - * fields are interesting in the top query itself. - */ - if (query->setOperations) - { - get_setop_query(query->setOperations, query, context, resultDesc); - /* ORDER BY clauses must be simple in this case */ - force_colno = true; - } - else - { - get_basic_select_query(query, context, resultDesc); - force_colno = false; - } - - /* Add the ORDER BY clause if given */ - if (query->sortClause != NIL) - { - appendContextKeyword(context, " ORDER BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_orderby(query->sortClause, query->targetList, - force_colno, context); - } - - /* Add the LIMIT clause if given */ - if (query->limitOffset != NULL) - { - appendContextKeyword(context, " OFFSET ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->limitOffset, context, false); - } - if (query->limitCount != NULL) - { - appendContextKeyword(context, " LIMIT ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - if (IsA(query->limitCount, Const) && - ((Const *) query->limitCount)->constisnull) - appendStringInfoString(buf, "ALL"); - else - get_rule_expr(query->limitCount, context, false); - } - - /* Add FOR [KEY] UPDATE/SHARE clauses if present */ - if (query->hasForUpdate) - { - foreach(l, query->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(l); - - /* don't print implicit clauses */ - if (rc->pushedDown) - continue; - - switch (rc->strength) - { - case LCS_NONE: - /* we intentionally throw an error for LCS_NONE */ - elog(ERROR, "unrecognized LockClauseStrength %d", - (int) rc->strength); - break; - case LCS_FORKEYSHARE: - appendContextKeyword(context, " FOR KEY SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORSHARE: - appendContextKeyword(context, " FOR SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORNOKEYUPDATE: - appendContextKeyword(context, " FOR NO KEY UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - case LCS_FORUPDATE: - appendContextKeyword(context, " FOR UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - break; - } - - appendStringInfo(buf, " OF %s", - quote_identifier(get_rtable_name(rc->rti, - context))); - if (rc->waitPolicy == LockWaitError) - appendStringInfoString(buf, " NOWAIT"); - else if (rc->waitPolicy == LockWaitSkip) - appendStringInfoString(buf, " SKIP LOCKED"); - } - } - - context->windowClause = save_windowclause; - context->windowTList = save_windowtlist; -} - -/* - * Detect whether query looks like SELECT ... FROM VALUES(); - * if so, return the VALUES RTE. Otherwise return NULL. - */ -static RangeTblEntry * -get_simple_values_rte(Query *query) -{ - RangeTblEntry *result = NULL; - ListCell *lc; - - /* - * We want to return true even if the Query also contains OLD or NEW rule - * RTEs. So the idea is to scan the rtable and see if there is only one - * inFromCl RTE that is a VALUES RTE. - */ - foreach(lc, query->rtable) - { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); - - if (rte->rtekind == RTE_VALUES && rte->inFromCl) - { - if (result) - return NULL; /* multiple VALUES (probably not possible) */ - result = rte; - } - else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) - continue; /* ignore rule entries */ - else - return NULL; /* something else -> not simple VALUES */ - } - - /* - * We don't need to check the targetlist in any great detail, because - * parser/analyze.c will never generate a "bare" VALUES RTE --- they only - * appear inside auto-generated sub-queries with very restricted - * structure. However, DefineView might have modified the tlist by - * injecting new column aliases; so compare tlist resnames against the - * RTE's names to detect that. - */ - if (result) - { - ListCell *lcn; - - if (list_length(query->targetList) != list_length(result->eref->colnames)) - return NULL; /* this probably cannot happen */ - forboth(lc, query->targetList, lcn, result->eref->colnames) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - char *cname = strVal(lfirst(lcn)); - - if (tle->resjunk) - return NULL; /* this probably cannot happen */ - if (tle->resname == NULL || strcmp(tle->resname, cname) != 0) - return NULL; /* column name has been changed */ - } - } - - return result; -} - -static void -get_basic_select_query(Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - RangeTblEntry *values_rte; - char *sep; - ListCell *l; - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - - /* - * If the query looks like SELECT * FROM (VALUES ...), then print just the - * VALUES part. This reverses what transformValuesClause() did at parse - * time. - */ - values_rte = get_simple_values_rte(query); - if (values_rte) - { - get_values_def(values_rte->values_lists, context); - return; - } - - /* - * Build up the query string - first we say SELECT - */ - appendStringInfoString(buf, "SELECT"); - - /* Add the DISTINCT clause if given */ - if (query->distinctClause != NIL) - { - if (query->hasDistinctOn) - { - appendStringInfoString(buf, " DISTINCT ON ("); - sep = ""; - foreach(l, query->distinctClause) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else - appendStringInfoString(buf, " DISTINCT"); - } - - /* Then we tell what to select (the targetlist) */ - get_target_list(query->targetList, context, resultDesc); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add the WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add the GROUP BY clause if given */ - if (query->groupClause != NULL || query->groupingSets != NULL) - { - ParseExprKind save_exprkind; - - appendContextKeyword(context, " GROUP BY ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - - save_exprkind = context->special_exprkind; - context->special_exprkind = EXPR_KIND_GROUP_BY; - - if (query->groupingSets == NIL) - { - sep = ""; - foreach(l, query->groupClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, - false, context); - sep = ", "; - } - } - else - { - sep = ""; - foreach(l, query->groupingSets) - { - GroupingSet *grp = lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_groupingset(grp, query->targetList, true, context); - sep = ", "; - } - } - - context->special_exprkind = save_exprkind; - } - - /* Add the HAVING clause if given */ - if (query->havingQual != NULL) - { - appendContextKeyword(context, " HAVING ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); - get_rule_expr(query->havingQual, context, false); - } - - /* Add the WINDOW clause if needed */ - if (query->windowClause != NIL) - get_rule_windowclause(query, context); -} - -/* ---------- - * get_target_list - Parse back a SELECT target list - * - * This is also used for RETURNING lists in INSERT/UPDATE/DELETE. - * ---------- - */ -static void -get_target_list(List *targetList, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - StringInfoData targetbuf; - bool last_was_multiline = false; - char *sep; - int colno; - ListCell *l; - - /* we use targetbuf to hold each TLE's text temporarily */ - initStringInfo(&targetbuf); - - sep = " "; - colno = 0; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - char *colname; - char *attname; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - colno++; - - /* - * Put the new field text into targetbuf so we can decide after we've - * got it whether or not it needs to go on a new line. - */ - resetStringInfo(&targetbuf); - context->buf = &targetbuf; - - /* - * We special-case Var nodes rather than using get_rule_expr. This is - * needed because get_rule_expr will display a whole-row Var as - * "foo.*", which is the preferred notation in most contexts, but at - * the top level of a SELECT list it's not right (the parser will - * expand that notation into multiple columns, yielding behavior - * different from a whole-row Var). We need to call get_variable - * directly so that we can tell it to do the right thing, and so that - * we can get the attribute name which is the default AS label. - */ - if (tle->expr && (IsA(tle->expr, Var))) - { - attname = get_variable((Var *) tle->expr, 0, true, context); - } - else - { - get_rule_expr((Node *) tle->expr, context, true); - /* We'll show the AS name unless it's this: */ - attname = "?column?"; - } - - /* - * Figure out what the result column should be called. In the context - * of a view, use the view's tuple descriptor (so as to pick up the - * effects of any column RENAME that's been done on the view). - * Otherwise, just use what we can find in the TLE. - */ - if (resultDesc && colno <= resultDesc->natts) - colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname); - else - colname = tle->resname; - - /* Show AS unless the column's name is correct as-is */ - if (colname) /* resname could be NULL */ - { - if (attname == NULL || strcmp(attname, colname) != 0) - appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); - } - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - int leading_nl_pos; - - /* Does the new field start with a new line? */ - if (targetbuf.len > 0 && targetbuf.data[0] == '\n') - leading_nl_pos = 0; - else - leading_nl_pos = -1; - - /* If so, we shouldn't add anything */ - if (leading_nl_pos >= 0) - { - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the output buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new field is - * not the first and either the new field would cause an - * overflow or the last field used more than one line. - */ - if (colno > 1 && - ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || - last_was_multiline)) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, PRETTYINDENT_VAR); - } - - /* Remember this field's multiline status for next iteration */ - last_was_multiline = - (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); - } - - /* Add the new field */ - appendStringInfoString(buf, targetbuf.data); - } - - /* clean up */ - pfree(targetbuf.data); -} - -static void -get_setop_query(Node *setOp, Query *query, deparse_context *context, - TupleDesc resultDesc) -{ - StringInfo buf = context->buf; - bool need_paren; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - if (IsA(setOp, RangeTblRef)) - { - RangeTblRef *rtr = (RangeTblRef *) setOp; - RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); - Query *subquery = rte->subquery; - - Assert(subquery != NULL); - Assert(subquery->setOperations == NULL); - /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ - need_paren = (subquery->cteList || - subquery->sortClause || - subquery->rowMarks || - subquery->limitOffset || - subquery->limitCount); - if (need_paren) - appendStringInfoChar(buf, '('); - get_query_def(subquery, buf, context->namespaces, resultDesc, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - if (need_paren) - appendStringInfoChar(buf, ')'); - } - else if (IsA(setOp, SetOperationStmt)) - { - SetOperationStmt *op = (SetOperationStmt *) setOp; - int subindent; - - /* - * We force parens when nesting two SetOperationStmts, except when the - * lefthand input is another setop of the same kind. Syntactically, - * we could omit parens in rather more cases, but it seems best to use - * parens to flag cases where the setop operator changes. If we use - * parens, we also increase the indentation level for the child query. - * - * There are some cases in which parens are needed around a leaf query - * too, but those are more easily handled at the next level down (see - * code above). - */ - if (IsA(op->larg, SetOperationStmt)) - { - SetOperationStmt *lop = (SetOperationStmt *) op->larg; - - if (op->op == lop->op && op->all == lop->all) - need_paren = false; - else - need_paren = true; - } - else - need_paren = false; - - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - appendContextKeyword(context, "", subindent, 0, 0); - } - else - subindent = 0; - - get_setop_query(op->larg, query, context, resultDesc); - - if (need_paren) - appendContextKeyword(context, ") ", -subindent, 0, 0); - else if (PRETTY_INDENT(context)) - appendContextKeyword(context, "", -subindent, 0, 0); - else - appendStringInfoChar(buf, ' '); - - switch (op->op) - { - case SETOP_UNION: - appendStringInfoString(buf, "UNION "); - break; - case SETOP_INTERSECT: - appendStringInfoString(buf, "INTERSECT "); - break; - case SETOP_EXCEPT: - appendStringInfoString(buf, "EXCEPT "); - break; - default: - elog(ERROR, "unrecognized set op: %d", - (int) op->op); - } - if (op->all) - appendStringInfoString(buf, "ALL "); - - /* Always parenthesize if RHS is another setop */ - need_paren = IsA(op->rarg, SetOperationStmt); - - /* - * The indentation code here is deliberately a bit different from that - * for the lefthand input, because we want the line breaks in - * different places. - */ - if (need_paren) - { - appendStringInfoChar(buf, '('); - subindent = PRETTYINDENT_STD; - } - else - subindent = 0; - appendContextKeyword(context, "", subindent, 0, 0); - - get_setop_query(op->rarg, query, context, resultDesc); - - if (PRETTY_INDENT(context)) - context->indentLevel -= subindent; - if (need_paren) - appendContextKeyword(context, ")", 0, 0, 0); - } - else - { - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(setOp)); - } -} - -/* - * Display a sort/group clause. - * - * Also returns the expression tree, so caller need not find it again. - */ -static Node * -get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, - deparse_context *context) -{ - StringInfo buf = context->buf; - TargetEntry *tle; - Node *expr; - - tle = get_sortgroupref_tle(ref, tlist); - expr = (Node *) tle->expr; - - /* - * Use column-number form if requested by caller. Otherwise, if - * expression is a constant, force it to be dumped with an explicit cast - * as decoration --- this is because a simple integer constant is - * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we - * dump it without any decoration. If it's anything more complex than a - * simple Var, then force extra parens around it, to ensure it can't be - * misinterpreted as a cube() or rollup() construct. - */ - if (force_colno) - { - Assert(!tle->resjunk); - appendStringInfo(buf, "%d", tle->resno); - } - else if (expr && IsA(expr, Const)) - get_const_expr((Const *) expr, context, 1); - else if (!expr || IsA(expr, Var)) - get_rule_expr(expr, context, true); - else - { - /* - * We must force parens for function-like expressions even if - * PRETTY_PAREN is off, since those are the ones in danger of - * misparsing. For other expressions we need to force them only if - * PRETTY_PAREN is on, since otherwise the expression will output them - * itself. (We can't skip the parens.) - */ - bool need_paren = (PRETTY_PAREN(context) - || IsA(expr, FuncExpr) - ||IsA(expr, Aggref) - ||IsA(expr, WindowFunc)); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - get_rule_expr(expr, context, true); - if (need_paren) - appendStringInfoChar(context->buf, ')'); - } - - return expr; -} - -/* - * Display a GroupingSet - */ -static void -get_rule_groupingset(GroupingSet *gset, List *targetlist, - bool omit_parens, deparse_context *context) -{ - ListCell *l; - StringInfo buf = context->buf; - bool omit_child_parens = true; - char *sep = ""; - - switch (gset->kind) - { - case GROUPING_SET_EMPTY: - appendStringInfoString(buf, "()"); - return; - - case GROUPING_SET_SIMPLE: - { - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoChar(buf, '('); - - foreach(l, gset->content) - { - Index ref = lfirst_int(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(ref, targetlist, - false, context); - sep = ", "; - } - - if (!omit_parens || list_length(gset->content) != 1) - appendStringInfoChar(buf, ')'); - } - return; - - case GROUPING_SET_ROLLUP: - appendStringInfoString(buf, "ROLLUP("); - break; - case GROUPING_SET_CUBE: - appendStringInfoString(buf, "CUBE("); - break; - case GROUPING_SET_SETS: - appendStringInfoString(buf, "GROUPING SETS ("); - omit_child_parens = false; - break; - } - - foreach(l, gset->content) - { - appendStringInfoString(buf, sep); - get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); - sep = ", "; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * Display an ORDER BY list. - */ -static void -get_rule_orderby(List *orderList, List *targetList, - bool force_colno, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = ""; - foreach(l, orderList) - { - SortGroupClause *srt = (SortGroupClause *) lfirst(l); - Node *sortexpr; - Oid sortcoltype; - TypeCacheEntry *typentry; - - appendStringInfoString(buf, sep); - sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, - force_colno, context); - sortcoltype = exprType(sortexpr); - /* See whether operator is default < or > for datatype */ - typentry = lookup_type_cache(sortcoltype, - TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - if (srt->sortop == typentry->lt_opr) - { - /* ASC is default, so emit nothing for it */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - } - else if (srt->sortop == typentry->gt_opr) - { - appendStringInfoString(buf, " DESC"); - /* DESC defaults to NULLS FIRST */ - if (!srt->nulls_first) - appendStringInfoString(buf, " NULLS LAST"); - } - else - { - appendStringInfo(buf, " USING %s", - generate_operator_name(srt->sortop, - sortcoltype, - sortcoltype)); - /* be specific to eliminate ambiguity */ - if (srt->nulls_first) - appendStringInfoString(buf, " NULLS FIRST"); - else - appendStringInfoString(buf, " NULLS LAST"); - } - sep = ", "; - } -} - -/* - * Display a WINDOW clause. - * - * Note that the windowClause list might contain only anonymous window - * specifications, in which case we should print nothing here. - */ -static void -get_rule_windowclause(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - const char *sep; - ListCell *l; - - sep = NULL; - foreach(l, query->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->name == NULL) - continue; /* ignore anonymous windows */ - - if (sep == NULL) - appendContextKeyword(context, " WINDOW ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - else - appendStringInfoString(buf, sep); - - appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); - - get_rule_windowspec(wc, query->targetList, context); - - sep = ", "; - } -} - -/* - * Display a window definition - */ -static void -get_rule_windowspec(WindowClause *wc, List *targetList, - deparse_context *context) -{ - StringInfo buf = context->buf; - bool needspace = false; - const char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - if (wc->refname) - { - appendStringInfoString(buf, quote_identifier(wc->refname)); - needspace = true; - } - /* partition clauses are always inherited, so only print if no refname */ - if (wc->partitionClause && !wc->refname) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "PARTITION BY "); - sep = ""; - foreach(l, wc->partitionClause) - { - SortGroupClause *grp = (SortGroupClause *) lfirst(l); - - appendStringInfoString(buf, sep); - get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, - false, context); - sep = ", "; - } - needspace = true; - } - /* print ordering clause only if not inherited */ - if (wc->orderClause && !wc->copiedOrder) - { - if (needspace) - appendStringInfoChar(buf, ' '); - appendStringInfoString(buf, "ORDER BY "); - get_rule_orderby(wc->orderClause, targetList, false, context); - needspace = true; - } - /* framing clause is never inherited, so print unless it's default */ - if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) - { - if (needspace) - appendStringInfoChar(buf, ' '); - if (wc->frameOptions & FRAMEOPTION_RANGE) - appendStringInfoString(buf, "RANGE "); - else if (wc->frameOptions & FRAMEOPTION_ROWS) - appendStringInfoString(buf, "ROWS "); - else if (wc->frameOptions & FRAMEOPTION_GROUPS) - appendStringInfoString(buf, "GROUPS "); - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - appendStringInfoString(buf, "BETWEEN "); - if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) - appendStringInfoString(buf, "UNBOUNDED PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_START_OFFSET) - { - get_rule_expr(wc->startOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - if (wc->frameOptions & FRAMEOPTION_BETWEEN) - { - appendStringInfoString(buf, "AND "); - if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) - appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); - else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) - appendStringInfoString(buf, "CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_END_OFFSET) - { - get_rule_expr(wc->endOffset, context, false); - if (wc->frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) - appendStringInfoString(buf, " PRECEDING "); - else if (wc->frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING) - appendStringInfoString(buf, " FOLLOWING "); - else - Assert(false); - } - else - Assert(false); - } - if (wc->frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW) - appendStringInfoString(buf, "EXCLUDE CURRENT ROW "); - else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_GROUP) - appendStringInfoString(buf, "EXCLUDE GROUP "); - else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_TIES) - appendStringInfoString(buf, "EXCLUDE TIES "); - /* we will now have a trailing space; remove it */ - buf->len--; - } - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_insert_query_def - Parse back an INSERT parsetree - * ---------- - */ -static void -get_insert_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *select_rte = NULL; - RangeTblEntry *values_rte = NULL; - RangeTblEntry *rte; - char *sep; - ListCell *l; - List *strippedexprs; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * If it's an INSERT ... SELECT or multi-row VALUES, there will be a - * single RTE for the SELECT or VALUES. Plain VALUES has neither. - */ - foreach(l, query->rtable) - { - rte = (RangeTblEntry *) lfirst(l); - - if (rte->rtekind == RTE_SUBQUERY) - { - if (select_rte) - elog(ERROR, "too many subquery RTEs in INSERT"); - select_rte = rte; - } - - if (rte->rtekind == RTE_VALUES) - { - if (values_rte) - elog(ERROR, "too many values RTEs in INSERT"); - values_rte = rte; - } - } - if (select_rte && values_rte) - elog(ERROR, "both subquery and values RTEs in INSERT"); - - /* - * Start the query with INSERT INTO relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - Assert(rte->rtekind == RTE_RELATION); - - if (PRETTY_INDENT(context)) - { - context->indentLevel += PRETTYINDENT_STD; - appendStringInfoChar(buf, ' '); - } - appendStringInfo(buf, "INSERT INTO %s ", - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - /* INSERT requires AS keyword for target alias */ - if (rte->alias != NULL) - appendStringInfo(buf, "AS %s ", - quote_identifier(get_rtable_name(query->resultRelation, context))); - - /* - * Add the insert-column-names list. Any indirection decoration needed on - * the column names can be inferred from the top targetlist. - */ - strippedexprs = NIL; - sep = ""; - if (query->targetList) - appendStringInfoChar(buf, '('); - foreach(l, query->targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk) - continue; /* ignore junk entries */ - - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_attname(rte->relid, - tle->resno, - false))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - * Add the stripped expressions to strippedexprs. (If it's a - * single-VALUES statement, the stripped expressions are the VALUES to - * print below. Otherwise they're just Vars and not really - * interesting.) - */ - strippedexprs = lappend(strippedexprs, - processIndirection((Node *) tle->expr, - context)); - } - if (query->targetList) - appendStringInfoString(buf, ") "); - - if (query->override) - { - if (query->override == OVERRIDING_SYSTEM_VALUE) - appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE "); - else if (query->override == OVERRIDING_USER_VALUE) - appendStringInfoString(buf, "OVERRIDING USER VALUE "); - } - - if (select_rte) - { - /* Add the SELECT */ - get_query_def(select_rte->subquery, buf, NIL, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - } - else if (values_rte) - { - /* Add the multi-VALUES expression lists */ - get_values_def(values_rte->values_lists, context); - } - else if (strippedexprs) - { - /* Add the single-VALUES expression list */ - appendContextKeyword(context, "VALUES (", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - get_rule_expr((Node *) strippedexprs, context, false); - appendStringInfoChar(buf, ')'); - } - else - { - /* No expressions, so it must be DEFAULT VALUES */ - appendStringInfoString(buf, "DEFAULT VALUES"); - } - - /* Add ON CONFLICT if present */ - if (query->onConflict) - { - OnConflictExpr *confl = query->onConflict; - - appendStringInfoString(buf, " ON CONFLICT"); - - if (confl->arbiterElems) - { - /* Add the single-VALUES expression list */ - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) confl->arbiterElems, context, false); - appendStringInfoChar(buf, ')'); - - /* Add a WHERE clause (for partial indexes) if given */ - if (confl->arbiterWhere != NULL) - { - bool save_varprefix; - - /* - * Force non-prefixing of Vars, since parser assumes that they - * belong to target relation. WHERE clause does not use - * InferenceElem, so this is separately required. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->arbiterWhere, context, false); - - context->varprefix = save_varprefix; - } - } - else if (OidIsValid(confl->constraint)) - { - char *constraint = get_constraint_name(confl->constraint); - int64 shardId = context->shardid; - - if (shardId > 0) - { - AppendShardIdToName(&constraint, shardId); - } - - if (!constraint) - elog(ERROR, "cache lookup failed for constraint %u", - confl->constraint); - appendStringInfo(buf, " ON CONSTRAINT %s", - quote_identifier(constraint)); - } - - if (confl->action == ONCONFLICT_NOTHING) - { - appendStringInfoString(buf, " DO NOTHING"); - } - else - { - appendStringInfoString(buf, " DO UPDATE SET "); - /* Deparse targetlist */ - get_update_query_targetlist_def(query, confl->onConflictSet, - context, rte); - - /* Add a WHERE clause if given */ - if (confl->onConflictWhere != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(confl->onConflictWhere, context, false); - } - } - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_update_query_def - Parse back an UPDATE parsetree - * ---------- - */ -static void -get_update_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with UPDATE relname SET - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "UPDATE %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - appendStringInfoString(buf, " SET "); - - /* Deparse targetlist */ - get_update_query_targetlist_def(query, query->targetList, context, rte); - - /* Add the FROM clause if needed */ - get_from_clause(query, " FROM ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_update_query_targetlist_def - Parse back an UPDATE targetlist - * ---------- - */ -static void -get_update_query_targetlist_def(Query *query, List *targetList, - deparse_context *context, RangeTblEntry *rte) -{ - StringInfo buf = context->buf; - ListCell *l; - ListCell *next_ma_cell; - int remaining_ma_columns; - const char *sep; - SubLink *cur_ma_sublink; - List *ma_sublinks; - - /* - * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks - * into a list. We expect them to appear, in ID order, in resjunk tlist - * entries. - */ - ma_sublinks = NIL; - if (query->hasSubLinks) /* else there can't be any */ - { - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - - if (tle->resjunk && IsA(tle->expr, SubLink)) - { - SubLink *sl = (SubLink *) tle->expr; - - if (sl->subLinkType == MULTIEXPR_SUBLINK) - { - ma_sublinks = lappend(ma_sublinks, sl); - Assert(sl->subLinkId == list_length(ma_sublinks)); - } - } - } - } - next_ma_cell = list_head(ma_sublinks); - cur_ma_sublink = NULL; - remaining_ma_columns = 0; - - /* Add the comma separated list of 'attname = value' */ - sep = ""; - foreach(l, targetList) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *expr; - - if (tle->resjunk) - continue; /* ignore junk entries */ - - /* Emit separator (OK whether we're in multiassignment or not) */ - appendStringInfoString(buf, sep); - sep = ", "; - - /* - * Check to see if we're starting a multiassignment group: if so, - * output a left paren. - */ - if (next_ma_cell != NULL && cur_ma_sublink == NULL) - { - /* - * We must dig down into the expr to see if it's a PARAM_MULTIEXPR - * Param. That could be buried under FieldStores and - * SubscriptingRefs and CoerceToDomains (cf processIndirection()), - * and underneath those there could be an implicit type coercion. - * Because we would ignore implicit type coercions anyway, we - * don't need to be as careful as processIndirection() is about - * descending past implicit CoerceToDomains. - */ - expr = (Node *) tle->expr; - while (expr) - { - if (IsA(expr, FieldStore)) - { - FieldStore *fstore = (FieldStore *) expr; - - expr = (Node *) linitial(fstore->newvals); - } - else if (IsA(expr, SubscriptingRef)) - { - SubscriptingRef *sbsref = (SubscriptingRef *) expr; - - if (sbsref->refassgnexpr == NULL) - break; - expr = (Node *) sbsref->refassgnexpr; - } - else if (IsA(expr, CoerceToDomain)) - { - CoerceToDomain *cdomain = (CoerceToDomain *) expr; - - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - expr = (Node *) cdomain->arg; - } - else - break; - } - expr = strip_implicit_coercions(expr); - - if (expr && IsA(expr, Param) && - ((Param *) expr)->paramkind == PARAM_MULTIEXPR) - { - cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); - next_ma_cell = lnext(next_ma_cell); - remaining_ma_columns = count_nonjunk_tlist_entries( - ((Query *) cur_ma_sublink->subselect)->targetList); - Assert(((Param *) expr)->paramid == - ((cur_ma_sublink->subLinkId << 16) | 1)); - appendStringInfoChar(buf, '('); - } - } - - /* - * Put out name of target column; look in the catalogs, not at - * tle->resname, since resname will fail to track RENAME. - */ - appendStringInfoString(buf, - quote_identifier(get_attname(rte->relid, - tle->resno, - false))); - - /* - * Print any indirection needed (subfields or subscripts), and strip - * off the top-level nodes representing the indirection assignments. - */ - expr = processIndirection((Node *) tle->expr, context); - - /* - * If we're in a multiassignment, skip printing anything more, unless - * this is the last column; in which case, what we print should be the - * sublink, not the Param. - */ - if (cur_ma_sublink != NULL) - { - if (--remaining_ma_columns > 0) - continue; /* not the last column of multiassignment */ - appendStringInfoChar(buf, ')'); - expr = (Node *) cur_ma_sublink; - cur_ma_sublink = NULL; - } - - appendStringInfoString(buf, " = "); - - get_rule_expr(expr, context, false); - } -} - - -/* ---------- - * get_delete_query_def - Parse back a DELETE parsetree - * ---------- - */ -static void -get_delete_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - - /* Insert the WITH clause if given */ - get_with_clause(query, context); - - /* - * Start the query with DELETE FROM relname - */ - rte = rt_fetch(query->resultRelation, query->rtable); - - if (PRETTY_INDENT(context)) - { - appendStringInfoChar(buf, ' '); - context->indentLevel += PRETTYINDENT_STD; - } - - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, fragmentTableName)); - - if(rte->eref != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - else - { - appendStringInfo(buf, "DELETE FROM %s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, NIL)); - - if (rte->alias != NULL) - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(query->resultRelation, context))); - } - - /* Add the USING clause if given */ - get_from_clause(query, " USING ", context); - - /* Add a WHERE clause if given */ - if (query->jointree->quals != NULL) - { - appendContextKeyword(context, " WHERE ", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_rule_expr(query->jointree->quals, context, false); - } - - /* Add RETURNING if present */ - if (query->returningList) - { - appendContextKeyword(context, " RETURNING", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); - get_target_list(query->returningList, context, NULL); - } -} - - -/* ---------- - * get_utility_query_def - Parse back a UTILITY parsetree - * ---------- - */ -static void -get_utility_query_def(Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) - { - NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - appendStringInfo(buf, "NOTIFY %s", - quote_identifier(stmt->conditionname)); - if (stmt->payload) - { - appendStringInfoString(buf, ", "); - simple_quote_literal(buf, stmt->payload); - } - } - else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) - { - TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; - List *relationList = stmt->relations; - ListCell *relationCell = NULL; - - appendContextKeyword(context, "", - 0, PRETTYINDENT_STD, 1); - - appendStringInfo(buf, "TRUNCATE TABLE"); - - foreach(relationCell, relationList) - { - RangeVar *relationVar = (RangeVar *) lfirst(relationCell); - Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); - char *relationName = generate_relation_or_shard_name(relationId, - context->distrelid, - context->shardid, NIL); - appendStringInfo(buf, " %s", relationName); - - if (lnext(relationCell) != NULL) - { - appendStringInfo(buf, ","); - } - } - - if (stmt->restart_seqs) - { - appendStringInfo(buf, " RESTART IDENTITY"); - } - - if (stmt->behavior == DROP_CASCADE) - { - appendStringInfo(buf, " CASCADE"); - } - } - else - { - /* Currently only NOTIFY utility commands can appear in rules */ - elog(ERROR, "unexpected utility statement type"); - } -} - -/* - * Display a Var appropriately. - * - * In some cases (currently only when recursing into an unnamed join) - * the Var's varlevelsup has to be interpreted with respect to a context - * above the current one; levelsup indicates the offset. - * - * If istoplevel is true, the Var is at the top level of a SELECT's - * targetlist, which means we need special treatment of whole-row Vars. - * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a - * dirty hack to prevent "tab.*" from being expanded into multiple columns. - * (The parser will strip the useless coercion, so no inefficiency is added in - * dump and reload.) We used to print just "tab" in such cases, but that is - * ambiguous and will yield the wrong result if "tab" is also a plain column - * name in the query. - * - * Returns the attname of the Var, or NULL if the Var has no attname (because - * it is a whole-row Var or a subplan output reference). - */ -static char * -get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) -{ - StringInfo buf = context->buf; - RangeTblEntry *rte; - AttrNumber attnum; - int netlevelsup; - deparse_namespace *dpns; - deparse_columns *colinfo; - char *refname; - char *attname; - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. Also - * find the aliases previously assigned for this RTE. - */ - if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) - { - rte = rt_fetch(var->varno, dpns->rtable); - refname = (char *) list_nth(dpns->rtable_names, var->varno - 1); - colinfo = deparse_columns_fetch(var->varno, dpns); - attnum = var->varattno; - } - else - { - resolve_special_varno((Node *) var, context, NULL, - get_special_variable); - return NULL; - } - - /* - * The planner will sometimes emit Vars referencing resjunk elements of a - * subquery's target list (this is currently only possible if it chooses - * to generate a "physical tlist" for a SubqueryScan or CteScan node). - * Although we prefer to print subquery-referencing Vars using the - * subquery's alias, that's not possible for resjunk items since they have - * no alias. So in that case, drill down to the subplan and print the - * contents of the referenced tlist item. This works because in a plan - * tree, such Vars can only occur in a SubqueryScan or CteScan node, and - * we'll have set dpns->inner_planstate to reference the child plan node. - */ - if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && - attnum > list_length(rte->eref->colnames) && - dpns->inner_planstate) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - var->varattno, rte->eref->aliasname); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - /* - * Force parentheses because our caller probably assumed a Var is a - * simple expression. - */ - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tle->expr, context, true); - if (!IsA(tle->expr, Var)) - appendStringInfoChar(buf, ')'); - - pop_child_plan(dpns, &save_dpns); - return NULL; - } - - /* - * If it's an unnamed join, look at the expansion of the alias variable. - * If it's a simple reference to one of the input vars, then recursively - * print the name of that var instead. When it's not a simple reference, - * we have to just print the unqualified join column name. (This can only - * happen with "dangerous" merged columns in a JOIN USING; we took pains - * previously to make the unqualified column name unique in such cases.) - * - * This wouldn't work in decompiling plan trees, because we don't store - * joinaliasvars lists after planning; but a plan tree should never - * contain a join alias variable. - */ - if (rte->rtekind == RTE_JOIN && rte->alias == NULL) - { - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - if (attnum > 0) - { - Var *aliasvar; - - aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); - /* we intentionally don't strip implicit coercions here */ - if (aliasvar && IsA(aliasvar, Var)) - { - return get_variable(aliasvar, var->varlevelsup + levelsup, - istoplevel, context); - } - } - - /* - * Unnamed join has no refname. (Note: since it's unnamed, there is - * no way the user could have referenced it to create a whole-row Var - * for it. So we don't have to cover that case below.) - */ - Assert(refname == NULL); - } - - if (attnum == InvalidAttrNumber) - attname = NULL; - else if (attnum > 0) - { - /* Get column name to use from the colinfo struct */ - if (attnum > colinfo->num_cols) - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - attname = colinfo->colnames[attnum - 1]; - if (attname == NULL) /* dropped column? */ - elog(ERROR, "invalid attnum %d for relation \"%s\"", - attnum, rte->eref->aliasname); - } - else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* System column on a Citus shard */ - attname = get_attname(rte->relid, attnum, false); - } - else - { - /* System column - name is fixed, get it from the catalog */ - attname = get_rte_attribute_name(rte, attnum); - } - - if (refname && (context->varprefix || attname == NULL)) - { - appendStringInfoString(buf, quote_identifier(refname)); - appendStringInfoChar(buf, '.'); - } - if (attname) - appendStringInfoString(buf, quote_identifier(attname)); - else - { - appendStringInfoChar(buf, '*'); - - if (istoplevel) - { - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - /* use rel.*::shard_name instead of rel.*::table_name */ - appendStringInfo(buf, "::%s", - generate_rte_shard_name(rte)); - } - else - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(var->vartype, - var->vartypmod)); - } - } - } - - return attname; -} - -/* - * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This - * routine is actually a callback for get_special_varno, which handles finding - * the correct TargetEntry. We get the expression contained in that - * TargetEntry and just need to deparse it, a job we can throw back on - * get_rule_expr. - */ -static void -get_special_variable(Node *node, deparse_context *context, void *private) -{ - StringInfo buf = context->buf; - - /* - * Force parentheses because our caller probably assumed a Var is a simple - * expression. - */ - if (!IsA(node, Var)) - appendStringInfoChar(buf, '('); - get_rule_expr(node, context, true); - if (!IsA(node, Var)) - appendStringInfoChar(buf, ')'); -} - -/* - * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, - * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, - * invoke the callback provided. - */ -static void -resolve_special_varno(Node *node, deparse_context *context, void *private, - void (*callback) (Node *, deparse_context *, void *)) -{ - Var *var; - deparse_namespace *dpns; - - /* If it's not a Var, invoke the callback. */ - if (!IsA(node, Var)) - { - callback(node, context, private); - return; - } - - /* Find appropriate nesting depth */ - var = (Var *) node; - dpns = (deparse_namespace *) list_nth(context->namespaces, - var->varlevelsup); - - /* - * It's a special RTE, so recurse. - */ - if (var->varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); - - push_child_plan(dpns, dpns->outer_planstate, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, private, callback); - pop_child_plan(dpns, &save_dpns); - return; - } - else if (var->varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); - - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - resolve_special_varno((Node *) tle->expr, context, private, callback); - pop_child_plan(dpns, &save_dpns); - return; - } - else if (var->varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - - tle = get_tle_by_resno(dpns->index_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); - - resolve_special_varno((Node *) tle->expr, context, private, callback); - return; - } - else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) - elog(ERROR, "bogus varno: %d", var->varno); - - /* Not special. Just invoke the callback. */ - callback(node, context, private); -} - -/* - * Get the name of a field of an expression of composite type. The - * expression is usually a Var, but we handle other cases too. - * - * levelsup is an extra offset to interpret the Var's varlevelsup correctly. - * - * This is fairly straightforward when the expression has a named composite - * type; we need only look up the type in the catalogs. However, the type - * could also be RECORD. Since no actual table or view column is allowed to - * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE - * or to a subquery output. We drill down to find the ultimate defining - * expression and attempt to infer the field name from it. We ereport if we - * can't determine the name. - * - * Similarly, a PARAM of type RECORD has to refer to some expression of - * a determinable composite type. - */ -static const char * -get_name_for_var_field(Var *var, int fieldno, - int levelsup, deparse_context *context) -{ - RangeTblEntry *rte; - AttrNumber attnum; - int netlevelsup; - deparse_namespace *dpns; - TupleDesc tupleDesc; - Node *expr; - - /* - * If it's a RowExpr that was expanded from a whole-row Var, use the - * column names attached to it. - */ - if (IsA(var, RowExpr)) - { - RowExpr *r = (RowExpr *) var; - - if (fieldno > 0 && fieldno <= list_length(r->colnames)) - return strVal(list_nth(r->colnames, fieldno - 1)); - } - - /* - * If it's a Param of type RECORD, try to find what the Param refers to. - */ - if (IsA(var, Param)) - { - Param *param = (Param *) var; - ListCell *ancestor_cell; - - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so recurse to decipher the field name */ - deparse_namespace save_dpns; - const char *result; - - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - pop_ancestor_plan(dpns, &save_dpns); - return result; - } - } - - /* - * If it's a Var of type RECORD, we have to find what the Var refers to; - * if not, we can use get_expr_result_tupdesc(). - */ - if (!IsA(var, Var) || - var->vartype != RECORDOID) - { - tupleDesc = get_expr_result_tupdesc((Node *) var, false); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); - } - - /* Find appropriate nesting depth */ - netlevelsup = var->varlevelsup + levelsup; - if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", - var->varlevelsup, levelsup); - dpns = (deparse_namespace *) list_nth(context->namespaces, - netlevelsup); - - /* - * Try to find the relevant RTE in this rtable. In a plan tree, it's - * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig - * down into the subplans, or INDEX_VAR, which is resolved similarly. - */ - if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) - { - rte = rt_fetch(var->varno, dpns->rtable); - attnum = var->varattno; - } - else if (var->varno == OUTER_VAR && dpns->outer_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->outer_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (var->varno == INNER_VAR && dpns->inner_tlist) - { - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); - - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - else if (var->varno == INDEX_VAR && dpns->index_tlist) - { - TargetEntry *tle; - const char *result; - - tle = get_tle_by_resno(dpns->index_tlist, var->varattno); - if (!tle) - elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); - - Assert(netlevelsup == 0); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - return result; - } - else - { - elog(ERROR, "bogus varno: %d", var->varno); - return NULL; /* keep compiler quiet */ - } - - if (attnum == InvalidAttrNumber) - { - /* Var is whole-row reference to RTE, so select the right field */ - return get_rte_attribute_name(rte, fieldno); - } - - /* - * This part has essentially the same logic as the parser's - * expandRecordVariable() function, but we are dealing with a different - * representation of the input context, and we only need one field name - * not a TupleDesc. Also, we need special cases for finding subquery and - * CTE subplans when deparsing Plan trees. - */ - expr = (Node *) var; /* default if we can't drill down */ - - switch (rte->rtekind) - { - case RTE_RELATION: - case RTE_VALUES: - case RTE_NAMEDTUPLESTORE: - case RTE_RESULT: - - /* - * This case should not occur: a column of a table or values list - * shouldn't have type RECORD. Fall through and fail (most - * likely) at the bottom. - */ - break; - case RTE_SUBQUERY: - /* Subselect-in-FROM: examine sub-select's output expr */ - { - if (rte->subquery) - { - TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the sub-select to see what its Var - * refers to. We have to build an additional level of - * namespace to keep in step with varlevelsup in the - * subselect. - */ - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, rte->subquery, - context->namespaces); - - context->namespaces = lcons(&mydpns, - context->namespaces); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = - list_delete_first(context->namespaces); - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have complete - * RTE entries (in particular, rte->subquery is NULL). But - * the only place we'd see a Var directly referencing a - * SUBQUERY RTE is in a SubqueryScan plan node, and we can - * look into the child plan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_planstate) - elog(ERROR, "failed to find plan for subquery %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - case RTE_JOIN: - /* Join RTE --- recursively inspect the alias variable */ - if (rte->joinaliasvars == NIL) - elog(ERROR, "cannot decompile join alias var in plan tree"); - Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); - expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); - Assert(expr != NULL); - /* we intentionally don't strip implicit coercions here */ - if (IsA(expr, Var)) - return get_name_for_var_field((Var *) expr, fieldno, - var->varlevelsup + levelsup, - context); - /* else fall through to inspect the expression */ - break; - case RTE_FUNCTION: - case RTE_TABLEFUNC: - - /* - * We couldn't get here unless a function is declared with one of - * its result columns as RECORD, which is not allowed. - */ - break; - case RTE_CTE: - /* CTE reference: examine subquery's output expr */ - { - CommonTableExpr *cte = NULL; - Index ctelevelsup; - ListCell *lc; - - /* - * Try to find the referenced CTE using the namespace stack. - */ - ctelevelsup = rte->ctelevelsup + netlevelsup; - if (ctelevelsup >= list_length(context->namespaces)) - lc = NULL; - else - { - deparse_namespace *ctedpns; - - ctedpns = (deparse_namespace *) - list_nth(context->namespaces, ctelevelsup); - foreach(lc, ctedpns->ctes) - { - cte = (CommonTableExpr *) lfirst(lc); - if (strcmp(cte->ctename, rte->ctename) == 0) - break; - } - } - if (lc != NULL) - { - Query *ctequery = (Query *) cte->ctequery; - TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), - attnum); - - if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", - rte->eref->aliasname, attnum); - expr = (Node *) ste->expr; - if (IsA(expr, Var)) - { - /* - * Recurse into the CTE to see what its Var refers to. - * We have to build an additional level of namespace - * to keep in step with varlevelsup in the CTE. - * Furthermore it could be an outer CTE, so we may - * have to delete some levels of namespace. - */ - List *save_nslist = context->namespaces; - List *new_nslist; - deparse_namespace mydpns; - const char *result; - - set_deparse_for_query(&mydpns, ctequery, - context->namespaces); - - new_nslist = list_copy_tail(context->namespaces, - ctelevelsup); - context->namespaces = lcons(&mydpns, new_nslist); - - result = get_name_for_var_field((Var *) expr, fieldno, - 0, context); - - context->namespaces = save_nslist; - - return result; - } - /* else fall through to inspect the expression */ - } - else - { - /* - * We're deparsing a Plan tree so we don't have a CTE - * list. But the only place we'd see a Var directly - * referencing a CTE RTE is in a CteScan plan node, and we - * can look into the subplan's tlist instead. - */ - TargetEntry *tle; - deparse_namespace save_dpns; - const char *result; - - if (!dpns->inner_planstate) - elog(ERROR, "failed to find plan for CTE %s", - rte->eref->aliasname); - tle = get_tle_by_resno(dpns->inner_tlist, attnum); - if (!tle) - elog(ERROR, "bogus varattno for subquery var: %d", - attnum); - Assert(netlevelsup == 0); - push_child_plan(dpns, dpns->inner_planstate, &save_dpns); - - result = get_name_for_var_field((Var *) tle->expr, fieldno, - levelsup, context); - - pop_child_plan(dpns, &save_dpns); - return result; - } - } - break; - } - - /* - * We now have an expression we can't expand any more, so see if - * get_expr_result_tupdesc() can do anything with it. - */ - tupleDesc = get_expr_result_tupdesc(expr, false); - /* Got the tupdesc, so we can extract the field name */ - Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); - return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname); -} - -/* - * Try to find the referenced expression for a PARAM_EXEC Param that might - * reference a parameter supplied by an upper NestLoop or SubPlan plan node. - * - * If successful, return the expression and set *dpns_p and *ancestor_cell_p - * appropriately for calling push_ancestor_plan(). If no referent can be - * found, return NULL. - */ -static Node * -find_param_referent(Param *param, deparse_context *context, - deparse_namespace **dpns_p, ListCell **ancestor_cell_p) -{ - /* Initialize output parameters to prevent compiler warnings */ - *dpns_p = NULL; - *ancestor_cell_p = NULL; - - /* - * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or - * SubPlan argument. This will necessarily be in some ancestor of the - * current expression's PlanState. - */ - if (param->paramkind == PARAM_EXEC) - { - deparse_namespace *dpns; - PlanState *child_ps; - bool in_same_plan_level; - ListCell *lc; - - dpns = (deparse_namespace *) linitial(context->namespaces); - child_ps = dpns->planstate; - in_same_plan_level = true; - - foreach(lc, dpns->ancestors) - { - PlanState *ps = (PlanState *) lfirst(lc); - ListCell *lc2; - - /* - * NestLoops transmit params to their inner child only; also, once - * we've crawled up out of a subplan, this couldn't possibly be - * the right match. - */ - if (IsA(ps, NestLoopState) && - child_ps == innerPlanState(ps) && - in_same_plan_level) - { - NestLoop *nl = (NestLoop *) ps->plan; - - foreach(lc2, nl->nestParams) - { - NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); - - if (nlp->paramno == param->paramid) - { - /* Found a match, so return it */ - *dpns_p = dpns; - *ancestor_cell_p = lc; - return (Node *) nlp->paramval; - } - } - } - - /* - * Check to see if we're crawling up from a subplan. - */ - foreach(lc2, ps->subPlan) - { - SubPlanState *sstate = (SubPlanState *) lfirst(lc2); - SubPlan *subplan = sstate->subplan; - ListCell *lc3; - ListCell *lc4; - - if (child_ps != sstate->planstate) - continue; - - /* Matched subplan, so check its arguments */ - forboth(lc3, subplan->parParam, lc4, subplan->args) - { - int paramid = lfirst_int(lc3); - Node *arg = (Node *) lfirst(lc4); - - if (paramid == param->paramid) - { - /* Found a match, so return it */ - *dpns_p = dpns; - *ancestor_cell_p = lc; - return arg; - } - } - - /* Keep looking, but we are emerging from a subplan. */ - in_same_plan_level = false; - break; - } - - /* - * Likewise check to see if we're emerging from an initplan. - * Initplans never have any parParams, so no need to search that - * list, but we need to know if we should reset - * in_same_plan_level. - */ - foreach(lc2, ps->initPlan) - { - SubPlanState *sstate = (SubPlanState *) lfirst(lc2); - - if (child_ps != sstate->planstate) - continue; - - /* No parameters to be had here. */ - Assert(sstate->subplan->parParam == NIL); - - /* Keep looking, but we are emerging from an initplan. */ - in_same_plan_level = false; - break; - } - - /* No luck, crawl up to next ancestor */ - child_ps = ps; - } - } - - /* No referent found */ - return NULL; -} - -/* - * Display a Param appropriately. - */ -static void -get_parameter(Param *param, deparse_context *context) -{ - Node *expr; - deparse_namespace *dpns; - ListCell *ancestor_cell; - - /* - * If it's a PARAM_EXEC parameter, try to locate the expression from which - * the parameter was computed. Note that failing to find a referent isn't - * an error, since the Param might well be a subplan output rather than an - * input. - */ - expr = find_param_referent(param, context, &dpns, &ancestor_cell); - if (expr) - { - /* Found a match, so print it */ - deparse_namespace save_dpns; - bool save_varprefix; - bool need_paren; - - /* Switch attention to the ancestor plan node */ - push_ancestor_plan(dpns, ancestor_cell, &save_dpns); - - /* - * Force prefixing of Vars, since they won't belong to the relation - * being scanned in the original plan node. - */ - save_varprefix = context->varprefix; - context->varprefix = true; - - /* - * A Param's expansion is typically a Var, Aggref, or upper-level - * Param, which wouldn't need extra parentheses. Otherwise, insert - * parens to ensure the expression looks atomic. - */ - need_paren = !(IsA(expr, Var) || - IsA(expr, Aggref) || - IsA(expr, Param)); - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(expr, context, false); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); - - context->varprefix = save_varprefix; - - pop_ancestor_plan(dpns, &save_dpns); - - return; - } - - /* - * Not PARAM_EXEC, or couldn't find referent: for base types just print $N. - * For composite types, add cast to the parameter to ease remote node detect - * the type. - */ - if (param->paramtype >= FirstNormalObjectId) - { - char *typeName = format_type_with_typemod(param->paramtype, param->paramtypmod); - - appendStringInfo(context->buf, "$%d::%s", param->paramid, typeName); - } - else - { - appendStringInfo(context->buf, "$%d", param->paramid); - } -} - -/* - * get_simple_binary_op_name - * - * helper function for isSimpleNode - * will return single char binary operator name, or NULL if it's not - */ -static const char * -get_simple_binary_op_name(OpExpr *expr) -{ - List *args = expr->args; - - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - const char *op; - - op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); - if (strlen(op) == 1) - return op; - } - return NULL; -} - - -/* - * isSimpleNode - check if given node is simple (doesn't need parenthesizing) - * - * true : simple in the context of parent node's type - * false : not simple - */ -static bool -isSimpleNode(Node *node, Node *parentNode, int prettyFlags) -{ - if (!node) - return false; - - switch (nodeTag(node)) - { - case T_Var: - case T_Const: - case T_Param: - case T_CoerceToDomainValue: - case T_SetToDefault: - case T_CurrentOfExpr: - /* single words: always simple */ - return true; - - case T_SubscriptingRef: - case T_ArrayExpr: - case T_RowExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_SQLValueFunction: - case T_XmlExpr: - case T_NextValueExpr: - case T_NullIfExpr: - case T_Aggref: - case T_WindowFunc: - case T_FuncExpr: - /* function-like: name(..) or name[..] */ - return true; - - /* CASE keywords act as parentheses */ - case T_CaseExpr: - return true; - - case T_FieldSelect: - - /* - * appears simple since . has top precedence, unless parent is - * T_FieldSelect itself! - */ - return (IsA(parentNode, FieldSelect) ? false : true); - - case T_FieldStore: - - /* - * treat like FieldSelect (probably doesn't matter) - */ - return (IsA(parentNode, FieldStore) ? false : true); - - case T_CoerceToDomain: - /* maybe simple, check args */ - return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, - node, prettyFlags); - case T_RelabelType: - return isSimpleNode((Node *) ((RelabelType *) node)->arg, - node, prettyFlags); - case T_CoerceViaIO: - return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, - node, prettyFlags); - case T_ArrayCoerceExpr: - return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, - node, prettyFlags); - case T_ConvertRowtypeExpr: - return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, - node, prettyFlags); - - case T_OpExpr: - { - /* depends on parent node type; needs further checking */ - if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) - { - const char *op; - const char *parentOp; - bool is_lopriop; - bool is_hipriop; - bool is_lopriparent; - bool is_hipriparent; - - op = get_simple_binary_op_name((OpExpr *) node); - if (!op) - return false; - - /* We know only the basic operators + - and * / % */ - is_lopriop = (strchr("+-", *op) != NULL); - is_hipriop = (strchr("*/%", *op) != NULL); - if (!(is_lopriop || is_hipriop)) - return false; - - parentOp = get_simple_binary_op_name((OpExpr *) parentNode); - if (!parentOp) - return false; - - is_lopriparent = (strchr("+-", *parentOp) != NULL); - is_hipriparent = (strchr("*/%", *parentOp) != NULL); - if (!(is_lopriparent || is_hipriparent)) - return false; - - if (is_hipriop && is_lopriparent) - return true; /* op binds tighter than parent */ - - if (is_lopriop && is_hipriparent) - return false; - - /* - * Operators are same priority --- can skip parens only if - * we have (a - b) - c, not a - (b - c). - */ - if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) - return true; - - return false; - } - /* else do the same stuff as for T_SubLink et al. */ - } - /* FALLTHROUGH */ - - case T_SubLink: - case T_NullTest: - case T_BooleanTest: - case T_DistinctExpr: - switch (nodeTag(parentNode)) - { - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_BoolExpr: /* lower precedence */ - case T_SubscriptingRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - case T_BoolExpr: - switch (nodeTag(parentNode)) - { - case T_BoolExpr: - if (prettyFlags & PRETTYFLAG_PAREN) - { - BoolExprType type; - BoolExprType parentType; - - type = ((BoolExpr *) node)->boolop; - parentType = ((BoolExpr *) parentNode)->boolop; - switch (type) - { - case NOT_EXPR: - case AND_EXPR: - if (parentType == AND_EXPR || parentType == OR_EXPR) - return true; - break; - case OR_EXPR: - if (parentType == OR_EXPR) - return true; - break; - } - } - return false; - case T_FuncExpr: - { - /* special handling for casts */ - CoercionForm type = ((FuncExpr *) parentNode)->funcformat; - - if (type == COERCE_EXPLICIT_CAST || - type == COERCE_IMPLICIT_CAST) - return false; - return true; /* own parentheses */ - } - case T_SubscriptingRef: /* other separators */ - case T_ArrayExpr: /* other separators */ - case T_RowExpr: /* other separators */ - case T_CoalesceExpr: /* own parentheses */ - case T_MinMaxExpr: /* own parentheses */ - case T_XmlExpr: /* own parentheses */ - case T_NullIfExpr: /* other separators */ - case T_Aggref: /* own parentheses */ - case T_WindowFunc: /* own parentheses */ - case T_CaseExpr: /* other separators */ - return true; - default: - return false; - } - - default: - break; - } - /* those we don't know: in dubio complexo */ - return false; -} - - -/* - * appendContextKeyword - append a keyword to buffer - * - * If prettyPrint is enabled, perform a line break, and adjust indentation. - * Otherwise, just append the keyword. - */ -static void -appendContextKeyword(deparse_context *context, const char *str, - int indentBefore, int indentAfter, int indentPlus) -{ - StringInfo buf = context->buf; - - if (PRETTY_INDENT(context)) - { - int indentAmount; - - context->indentLevel += indentBefore; - - /* remove any trailing spaces currently in the buffer ... */ - removeStringInfoSpaces(buf); - /* ... then add a newline and some spaces */ - appendStringInfoChar(buf, '\n'); - - if (context->indentLevel < PRETTYINDENT_LIMIT) - indentAmount = Max(context->indentLevel, 0) + indentPlus; - else - { - /* - * If we're indented more than PRETTYINDENT_LIMIT characters, try - * to conserve horizontal space by reducing the per-level - * indentation. For best results the scale factor here should - * divide all the indent amounts that get added to indentLevel - * (PRETTYINDENT_STD, etc). It's important that the indentation - * not grow unboundedly, else deeply-nested trees use O(N^2) - * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. - */ - indentAmount = PRETTYINDENT_LIMIT + - (context->indentLevel - PRETTYINDENT_LIMIT) / - (PRETTYINDENT_STD / 2); - indentAmount %= PRETTYINDENT_LIMIT; - /* scale/wrap logic affects indentLevel, but not indentPlus */ - indentAmount += indentPlus; - } - appendStringInfoSpaces(buf, indentAmount); - - appendStringInfoString(buf, str); - - context->indentLevel += indentAfter; - if (context->indentLevel < 0) - context->indentLevel = 0; - } - else - appendStringInfoString(buf, str); -} - -/* - * removeStringInfoSpaces - delete trailing spaces from a buffer. - * - * Possibly this should move to stringinfo.c at some point. - */ -static void -removeStringInfoSpaces(StringInfo str) -{ - while (str->len > 0 && str->data[str->len - 1] == ' ') - str->data[--(str->len)] = '\0'; -} - - -/* - * get_rule_expr_paren - deparse expr using get_rule_expr, - * embracing the string with parentheses if necessary for prettyPrint. - * - * Never embrace if prettyFlags=0, because it's done in the calling node. - * - * Any node that does *not* embrace its argument node by sql syntax (with - * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should - * use get_rule_expr_paren instead of get_rule_expr so parentheses can be - * added. - */ -static void -get_rule_expr_paren(Node *node, deparse_context *context, - bool showimplicit, Node *parentNode) -{ - bool need_paren; - - need_paren = PRETTY_PAREN(context) && - !isSimpleNode(node, parentNode, context->prettyFlags); - - if (need_paren) - appendStringInfoChar(context->buf, '('); - - get_rule_expr(node, context, showimplicit); - - if (need_paren) - appendStringInfoChar(context->buf, ')'); -} - - -/* ---------- - * get_rule_expr - Parse back an expression - * - * Note: showimplicit determines whether we display any implicit cast that - * is present at the top of the expression tree. It is a passed argument, - * not a field of the context struct, because we change the value as we - * recurse down into the expression. In general we suppress implicit casts - * when the result type is known with certainty (eg, the arguments of an - * OR must be boolean). We display implicit casts for arguments of functions - * and operators, since this is needed to be certain that the same function - * or operator will be chosen when the expression is re-parsed. - * ---------- - */ -static void -get_rule_expr(Node *node, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - - if (node == NULL) - return; - - /* Guard against excessively long or deeply-nested queries */ - CHECK_FOR_INTERRUPTS(); - check_stack_depth(); - - /* - * Each level of get_rule_expr must emit an indivisible term - * (parenthesized if necessary) to ensure result is reparsed into the same - * expression tree. The only exception is that when the input is a List, - * we emit the component items comma-separated with no surrounding - * decoration; this is convenient for most callers. - */ - switch (nodeTag(node)) - { - case T_Var: - (void) get_variable((Var *) node, 0, false, context); - break; - - case T_Const: - get_const_expr((Const *) node, context, 0); - break; - - case T_Param: - get_parameter((Param *) node, context); - break; - - case T_Aggref: - get_agg_expr((Aggref *) node, context, (Aggref *) node); - break; - - case T_GroupingFunc: - { - GroupingFunc *gexpr = (GroupingFunc *) node; - - appendStringInfoString(buf, "GROUPING("); - get_rule_expr((Node *) gexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_WindowFunc: - get_windowfunc_expr((WindowFunc *) node, context); - break; - - case T_SubscriptingRef: - { - SubscriptingRef *sbsref = (SubscriptingRef *) node; - bool need_parens; - - /* - * If the argument is a CaseTestExpr, we must be inside a - * FieldStore, ie, we are assigning to an element of an array - * within a composite column. Since we already punted on - * displaying the FieldStore's target information, just punt - * here too, and display only the assignment source - * expression. - */ - if (IsA(sbsref->refexpr, CaseTestExpr)) - { - Assert(sbsref->refassgnexpr); - get_rule_expr((Node *) sbsref->refassgnexpr, - context, showimplicit); - break; - } - - /* - * Parenthesize the argument unless it's a simple Var or a - * FieldSelect. (In particular, if it's another - * SubscriptingRef, we *must* parenthesize to avoid - * confusion.) - */ - need_parens = !IsA(sbsref->refexpr, Var) && - !IsA(sbsref->refexpr, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) sbsref->refexpr, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * If there's a refassgnexpr, we want to print the node in the - * format "container[subscripts] := refassgnexpr". This is - * not legal SQL, so decompilation of INSERT or UPDATE - * statements should always use processIndirection as part of - * the statement-level syntax. We should only see this when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. - */ - if (sbsref->refassgnexpr) - { - Node *refassgnexpr; - - /* - * Use processIndirection to print this node's subscripts - * as well as any additional field selections or - * subscripting in immediate descendants. It returns the - * RHS expr that is actually being "assigned". - */ - refassgnexpr = processIndirection(node, context); - appendStringInfoString(buf, " := "); - get_rule_expr(refassgnexpr, context, showimplicit); - } - else - { - /* Just an ordinary container fetch, so print subscripts */ - printSubscripts(sbsref, context); - } - } - break; - - case T_FuncExpr: - get_func_expr((FuncExpr *) node, context, showimplicit); - break; - - case T_NamedArgExpr: - { - NamedArgExpr *na = (NamedArgExpr *) node; - - appendStringInfo(buf, "%s => ", quote_identifier(na->name)); - get_rule_expr((Node *) na->arg, context, showimplicit); - } - break; - - case T_OpExpr: - get_oper_expr((OpExpr *) node, context); - break; - - case T_DistinctExpr: - { - DistinctExpr *expr = (DistinctExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfoString(buf, " IS DISTINCT FROM "); - get_rule_expr_paren(arg2, context, true, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullIfExpr: - { - NullIfExpr *nullifexpr = (NullIfExpr *) node; - - appendStringInfoString(buf, "NULLIF("); - get_rule_expr((Node *) nullifexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_ScalarArrayOpExpr: - { - ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; - List *args = expr->args; - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg1, context, true, node); - appendStringInfo(buf, " %s %s (", - generate_operator_name(expr->opno, - exprType(arg1), - get_base_element_type(exprType(arg2))), - expr->useOr ? "ANY" : "ALL"); - get_rule_expr_paren(arg2, context, true, node); - - /* - * There's inherent ambiguity in "x op ANY/ALL (y)" when y is - * a bare sub-SELECT. Since we're here, the sub-SELECT must - * be meant as a scalar sub-SELECT yielding an array value to - * be used in ScalarArrayOpExpr; but the grammar will - * preferentially interpret such a construct as an ANY/ALL - * SubLink. To prevent misparsing the output that way, insert - * a dummy coercion (which will be stripped by parse analysis, - * so no inefficiency is added in dump and reload). This is - * indeed most likely what the user wrote to get the construct - * accepted in the first place. - */ - if (IsA(arg2, SubLink) && - ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) - appendStringInfo(buf, "::%s", - format_type_with_typemod(exprType(arg2), - exprTypmod(arg2))); - appendStringInfoChar(buf, ')'); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BoolExpr: - { - BoolExpr *expr = (BoolExpr *) node; - Node *first_arg = linitial(expr->args); - ListCell *arg = lnext(list_head(expr->args)); - - switch (expr->boolop) - { - case AND_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - while (arg) - { - appendStringInfoString(buf, " AND "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - arg = lnext(arg); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case OR_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(first_arg, context, - false, node); - while (arg) - { - appendStringInfoString(buf, " OR "); - get_rule_expr_paren((Node *) lfirst(arg), context, - false, node); - arg = lnext(arg); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - case NOT_EXPR: - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - appendStringInfoString(buf, "NOT "); - get_rule_expr_paren(first_arg, context, - false, node); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - break; - - default: - elog(ERROR, "unrecognized boolop: %d", - (int) expr->boolop); - } - } - break; - - case T_SubLink: - get_sublink_expr((SubLink *) node, context); - break; - - case T_SubPlan: - { - SubPlan *subplan = (SubPlan *) node; - - /* - * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to - * reconstruct the original SQL, just reference the subplan - * that appears elsewhere in EXPLAIN's result. - */ - if (subplan->useHashTable) - appendStringInfo(buf, "(hashed %s)", subplan->plan_name); - else - appendStringInfo(buf, "(%s)", subplan->plan_name); - } - break; - - case T_AlternativeSubPlan: - { - AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; - ListCell *lc; - - /* As above, this can only happen during EXPLAIN */ - appendStringInfoString(buf, "(alternatives: "); - foreach(lc, asplan->subplans) - { - SubPlan *splan = lfirst_node(SubPlan, lc); - - if (splan->useHashTable) - appendStringInfo(buf, "hashed %s", splan->plan_name); - else - appendStringInfoString(buf, splan->plan_name); - if (lnext(lc)) - appendStringInfoString(buf, " or "); - } - appendStringInfoChar(buf, ')'); - } - break; - - case T_FieldSelect: - { - FieldSelect *fselect = (FieldSelect *) node; - Node *arg = (Node *) fselect->arg; - int fno = fselect->fieldnum; - const char *fieldname; - bool need_parens; - - /* - * Parenthesize the argument unless it's an SubscriptingRef or - * another FieldSelect. Note in particular that it would be - * WRONG to not parenthesize a Var argument; simplicity is not - * the issue here, having the right number of names is. - */ - need_parens = !IsA(arg, SubscriptingRef) && - !IsA(arg, FieldSelect); - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr(arg, context, true); - if (need_parens) - appendStringInfoChar(buf, ')'); - - /* - * Get and print the field name. - */ - fieldname = get_name_for_var_field((Var *) arg, fno, - 0, context); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - } - break; - - case T_FieldStore: - { - FieldStore *fstore = (FieldStore *) node; - bool need_parens; - - /* - * There is no good way to represent a FieldStore as real SQL, - * so decompilation of INSERT or UPDATE statements should - * always use processIndirection as part of the - * statement-level syntax. We should only get here when - * EXPLAIN tries to print the targetlist of a plan resulting - * from such a statement. The plan case is even harder than - * ordinary rules would be, because the planner tries to - * collapse multiple assignments to the same field or subfield - * into one FieldStore; so we can see a list of target fields - * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target - * field names; we just print the source arguments, with a - * ROW() around them if there's more than one. This isn't - * terribly complete, but it's probably good enough for - * EXPLAIN's purposes; especially since anything more would be - * either hopelessly confusing or an even poorer - * representation of what the plan is actually doing. - */ - need_parens = (list_length(fstore->newvals) != 1); - if (need_parens) - appendStringInfoString(buf, "ROW("); - get_rule_expr((Node *) fstore->newvals, context, showimplicit); - if (need_parens) - appendStringInfoChar(buf, ')'); - } - break; - - case T_RelabelType: - { - RelabelType *relabel = (RelabelType *) node; - - /* - * This is a Citus specific modification - * The planner converts CollateExpr to RelabelType - * and here we convert back. - */ - if (relabel->resultcollid != InvalidOid) - { - CollateExpr *collate = RelabelTypeToCollateExpr(relabel); - Node *arg = (Node *) collate->arg; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, showimplicit, node); - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(collate->collOid)); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - else - { - Node *arg = (Node *) relabel->arg; - - if (relabel->relabelformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - relabel->resulttype, - relabel->resulttypmod, - node); - } - } - } - break; - - case T_CoerceViaIO: - { - CoerceViaIO *iocoerce = (CoerceViaIO *) node; - Node *arg = (Node *) iocoerce->arg; - - if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - iocoerce->resulttype, - -1, - node); - } - } - break; - - case T_ArrayCoerceExpr: - { - ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; - Node *arg = (Node *) acoerce->arg; - - if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - acoerce->resulttype, - acoerce->resulttypmod, - node); - } - } - break; - - case T_ConvertRowtypeExpr: - { - ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; - Node *arg = (Node *) convert->arg; - - if (convert->convertformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr_paren(arg, context, false, node); - } - else - { - get_coercion_expr(arg, context, - convert->resulttype, -1, - node); - } - } - break; - - case T_CollateExpr: - { - CollateExpr *collate = (CollateExpr *) node; - Node *arg = (Node *) collate->arg; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, showimplicit, node); - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(collate->collOid)); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CaseExpr: - { - CaseExpr *caseexpr = (CaseExpr *) node; - ListCell *temp; - - appendContextKeyword(context, "CASE", - 0, PRETTYINDENT_VAR, 0); - if (caseexpr->arg) - { - appendStringInfoChar(buf, ' '); - get_rule_expr((Node *) caseexpr->arg, context, true); - } - foreach(temp, caseexpr->args) - { - CaseWhen *when = (CaseWhen *) lfirst(temp); - Node *w = (Node *) when->expr; - - if (caseexpr->arg) - { - /* - * The parser should have produced WHEN clauses of the - * form "CaseTestExpr = RHS", possibly with an - * implicit coercion inserted above the CaseTestExpr. - * For accurate decompilation of rules it's essential - * that we show just the RHS. However in an - * expression that's been through the optimizer, the - * WHEN clause could be almost anything (since the - * equality operator could have been expanded into an - * inline function). If we don't recognize the form - * of the WHEN clause, just punt and display it as-is. - */ - if (IsA(w, OpExpr)) - { - List *args = ((OpExpr *) w)->args; - - if (list_length(args) == 2 && - IsA(strip_implicit_coercions(linitial(args)), - CaseTestExpr)) - w = (Node *) lsecond(args); - } - } - - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "WHEN ", - 0, 0, 0); - get_rule_expr(w, context, false); - appendStringInfoString(buf, " THEN "); - get_rule_expr((Node *) when->result, context, true); - } - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "ELSE ", - 0, 0, 0); - get_rule_expr((Node *) caseexpr->defresult, context, true); - if (!PRETTY_INDENT(context)) - appendStringInfoChar(buf, ' '); - appendContextKeyword(context, "END", - -PRETTYINDENT_VAR, 0, 0); - } - break; - - case T_CaseTestExpr: - { - /* - * Normally we should never get here, since for expressions - * that can contain this node type we attempt to avoid - * recursing to it. But in an optimized expression we might - * be unable to avoid that (see comments for CaseExpr). If we - * do see one, print it as CASE_TEST_EXPR. - */ - appendStringInfoString(buf, "CASE_TEST_EXPR"); - } - break; - - case T_ArrayExpr: - { - ArrayExpr *arrayexpr = (ArrayExpr *) node; - - appendStringInfoString(buf, "ARRAY["); - get_rule_expr((Node *) arrayexpr->elements, context, true); - appendStringInfoChar(buf, ']'); - - /* - * If the array isn't empty, we assume its elements are - * coerced to the desired type. If it's empty, though, we - * need an explicit coercion to the array type. - */ - if (arrayexpr->elements == NIL) - appendStringInfo(buf, "::%s", - format_type_with_typemod(arrayexpr->array_typeid, -1)); - } - break; - - case T_RowExpr: - { - RowExpr *rowexpr = (RowExpr *) node; - TupleDesc tupdesc = NULL; - ListCell *arg; - int i; - char *sep; - - /* - * If it's a named type and not RECORD, we may have to skip - * dropped columns and/or claim there are NULLs for added - * columns. - */ - if (rowexpr->row_typeid != RECORDOID) - { - tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); - Assert(list_length(rowexpr->args) <= tupdesc->natts); - } - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "ROW("); - sep = ""; - i = 0; - foreach(arg, rowexpr->args) - { - Node *e = (Node *) lfirst(arg); - - if (tupdesc == NULL || - !TupleDescAttr(tupdesc, i)->attisdropped) - { - appendStringInfoString(buf, sep); - /* Whole-row Vars need special treatment here */ - get_rule_expr_toplevel(e, context, true); - sep = ", "; - } - i++; - } - if (tupdesc != NULL) - { - while (i < tupdesc->natts) - { - if (!TupleDescAttr(tupdesc, i)->attisdropped) - { - appendStringInfoString(buf, sep); - appendStringInfoString(buf, "NULL"); - sep = ", "; - } - i++; - } - - ReleaseTupleDesc(tupdesc); - } - appendStringInfoChar(buf, ')'); - if (rowexpr->row_format == COERCE_EXPLICIT_CAST) - appendStringInfo(buf, "::%s", - format_type_with_typemod(rowexpr->row_typeid, -1)); - } - break; - - case T_RowCompareExpr: - { - RowCompareExpr *rcexpr = (RowCompareExpr *) node; - ListCell *arg; - char *sep; - - /* - * SQL99 allows "ROW" to be omitted when there is more than - * one column, but for simplicity we always print it. - */ - appendStringInfoString(buf, "(ROW("); - sep = ""; - foreach(arg, rcexpr->largs) - { - Node *e = (Node *) lfirst(arg); - - appendStringInfoString(buf, sep); - get_rule_expr(e, context, true); - sep = ", "; - } - - /* - * We assume that the name of the first-column operator will - * do for all the rest too. This is definitely open to - * failure, eg if some but not all operators were renamed - * since the construct was parsed, but there seems no way to - * be perfect. - */ - appendStringInfo(buf, ") %s ROW(", - generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs)))); - sep = ""; - foreach(arg, rcexpr->rargs) - { - Node *e = (Node *) lfirst(arg); - - appendStringInfoString(buf, sep); - get_rule_expr(e, context, true); - sep = ", "; - } - appendStringInfoString(buf, "))"); - } - break; - - case T_CoalesceExpr: - { - CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; - - appendStringInfoString(buf, "COALESCE("); - get_rule_expr((Node *) coalesceexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_MinMaxExpr: - { - MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; - - switch (minmaxexpr->op) - { - case IS_GREATEST: - appendStringInfoString(buf, "GREATEST("); - break; - case IS_LEAST: - appendStringInfoString(buf, "LEAST("); - break; - } - get_rule_expr((Node *) minmaxexpr->args, context, true); - appendStringInfoChar(buf, ')'); - } - break; - - case T_SQLValueFunction: - { - SQLValueFunction *svf = (SQLValueFunction *) node; - - /* - * Note: this code knows that typmod for time, timestamp, and - * timestamptz just prints as integer. - */ - switch (svf->op) - { - case SVFOP_CURRENT_DATE: - appendStringInfoString(buf, "CURRENT_DATE"); - break; - case SVFOP_CURRENT_TIME: - appendStringInfoString(buf, "CURRENT_TIME"); - break; - case SVFOP_CURRENT_TIME_N: - appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod); - break; - case SVFOP_CURRENT_TIMESTAMP: - appendStringInfoString(buf, "CURRENT_TIMESTAMP"); - break; - case SVFOP_CURRENT_TIMESTAMP_N: - appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)", - svf->typmod); - break; - case SVFOP_LOCALTIME: - appendStringInfoString(buf, "LOCALTIME"); - break; - case SVFOP_LOCALTIME_N: - appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod); - break; - case SVFOP_LOCALTIMESTAMP: - appendStringInfoString(buf, "LOCALTIMESTAMP"); - break; - case SVFOP_LOCALTIMESTAMP_N: - appendStringInfo(buf, "LOCALTIMESTAMP(%d)", - svf->typmod); - break; - case SVFOP_CURRENT_ROLE: - appendStringInfoString(buf, "CURRENT_ROLE"); - break; - case SVFOP_CURRENT_USER: - appendStringInfoString(buf, "CURRENT_USER"); - break; - case SVFOP_USER: - appendStringInfoString(buf, "USER"); - break; - case SVFOP_SESSION_USER: - appendStringInfoString(buf, "SESSION_USER"); - break; - case SVFOP_CURRENT_CATALOG: - appendStringInfoString(buf, "CURRENT_CATALOG"); - break; - case SVFOP_CURRENT_SCHEMA: - appendStringInfoString(buf, "CURRENT_SCHEMA"); - break; - } - } - break; - - case T_XmlExpr: - { - XmlExpr *xexpr = (XmlExpr *) node; - bool needcomma = false; - ListCell *arg; - ListCell *narg; - Const *con; - - switch (xexpr->op) - { - case IS_XMLCONCAT: - appendStringInfoString(buf, "XMLCONCAT("); - break; - case IS_XMLELEMENT: - appendStringInfoString(buf, "XMLELEMENT("); - break; - case IS_XMLFOREST: - appendStringInfoString(buf, "XMLFOREST("); - break; - case IS_XMLPARSE: - appendStringInfoString(buf, "XMLPARSE("); - break; - case IS_XMLPI: - appendStringInfoString(buf, "XMLPI("); - break; - case IS_XMLROOT: - appendStringInfoString(buf, "XMLROOT("); - break; - case IS_XMLSERIALIZE: - appendStringInfoString(buf, "XMLSERIALIZE("); - break; - case IS_DOCUMENT: - break; - } - if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) - { - if (xexpr->xmloption == XMLOPTION_DOCUMENT) - appendStringInfoString(buf, "DOCUMENT "); - else - appendStringInfoString(buf, "CONTENT "); - } - if (xexpr->name) - { - appendStringInfo(buf, "NAME %s", - quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); - needcomma = true; - } - if (xexpr->named_args) - { - if (xexpr->op != IS_XMLFOREST) - { - if (needcomma) - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, "XMLATTRIBUTES("); - needcomma = false; - } - forboth(arg, xexpr->named_args, narg, xexpr->arg_names) - { - Node *e = (Node *) lfirst(arg); - char *argname = strVal(lfirst(narg)); - - if (needcomma) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) e, context, true); - appendStringInfo(buf, " AS %s", - quote_identifier(map_xml_name_to_sql_identifier(argname))); - needcomma = true; - } - if (xexpr->op != IS_XMLFOREST) - appendStringInfoChar(buf, ')'); - } - if (xexpr->args) - { - if (needcomma) - appendStringInfoString(buf, ", "); - switch (xexpr->op) - { - case IS_XMLCONCAT: - case IS_XMLELEMENT: - case IS_XMLFOREST: - case IS_XMLPI: - case IS_XMLSERIALIZE: - /* no extra decoration needed */ - get_rule_expr((Node *) xexpr->args, context, true); - break; - case IS_XMLPARSE: - Assert(list_length(xexpr->args) == 2); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - con = lsecond_node(Const, xexpr->args); - Assert(!con->constisnull); - if (DatumGetBool(con->constvalue)) - appendStringInfoString(buf, - " PRESERVE WHITESPACE"); - else - appendStringInfoString(buf, - " STRIP WHITESPACE"); - break; - case IS_XMLROOT: - Assert(list_length(xexpr->args) == 3); - - get_rule_expr((Node *) linitial(xexpr->args), - context, true); - - appendStringInfoString(buf, ", VERSION "); - con = (Const *) lsecond(xexpr->args); - if (IsA(con, Const) && - con->constisnull) - appendStringInfoString(buf, "NO VALUE"); - else - get_rule_expr((Node *) con, context, false); - - con = lthird_node(Const, xexpr->args); - if (con->constisnull) - /* suppress STANDALONE NO VALUE */ ; - else - { - switch (DatumGetInt32(con->constvalue)) - { - case XML_STANDALONE_YES: - appendStringInfoString(buf, - ", STANDALONE YES"); - break; - case XML_STANDALONE_NO: - appendStringInfoString(buf, - ", STANDALONE NO"); - break; - case XML_STANDALONE_NO_VALUE: - appendStringInfoString(buf, - ", STANDALONE NO VALUE"); - break; - default: - break; - } - } - break; - case IS_DOCUMENT: - get_rule_expr_paren((Node *) xexpr->args, context, false, node); - break; - } - - } - if (xexpr->op == IS_XMLSERIALIZE) - appendStringInfo(buf, " AS %s", - format_type_with_typemod(xexpr->type, - xexpr->typmod)); - if (xexpr->op == IS_DOCUMENT) - appendStringInfoString(buf, " IS DOCUMENT"); - else - appendStringInfoChar(buf, ')'); - } - break; - - case T_NullTest: - { - NullTest *ntest = (NullTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) ntest->arg, context, true, node); - - /* - * For scalar inputs, we prefer to print as IS [NOT] NULL, - * which is shorter and traditional. If it's a rowtype input - * but we're applying a scalar test, must print IS [NOT] - * DISTINCT FROM NULL to be semantically correct. - */ - if (ntest->argisrow || - !type_is_rowtype(exprType((Node *) ntest->arg))) - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS NOT NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - else - { - switch (ntest->nulltesttype) - { - case IS_NULL: - appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); - break; - case IS_NOT_NULL: - appendStringInfoString(buf, " IS DISTINCT FROM NULL"); - break; - default: - elog(ERROR, "unrecognized nulltesttype: %d", - (int) ntest->nulltesttype); - } - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_BooleanTest: - { - BooleanTest *btest = (BooleanTest *) node; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren((Node *) btest->arg, context, false, node); - switch (btest->booltesttype) - { - case IS_TRUE: - appendStringInfoString(buf, " IS TRUE"); - break; - case IS_NOT_TRUE: - appendStringInfoString(buf, " IS NOT TRUE"); - break; - case IS_FALSE: - appendStringInfoString(buf, " IS FALSE"); - break; - case IS_NOT_FALSE: - appendStringInfoString(buf, " IS NOT FALSE"); - break; - case IS_UNKNOWN: - appendStringInfoString(buf, " IS UNKNOWN"); - break; - case IS_NOT_UNKNOWN: - appendStringInfoString(buf, " IS NOT UNKNOWN"); - break; - default: - elog(ERROR, "unrecognized booltesttype: %d", - (int) btest->booltesttype); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - break; - - case T_CoerceToDomain: - { - CoerceToDomain *ctest = (CoerceToDomain *) node; - Node *arg = (Node *) ctest->arg; - - if (ctest->coercionformat == COERCE_IMPLICIT_CAST && - !showimplicit) - { - /* don't show the implicit cast */ - get_rule_expr(arg, context, false); - } - else - { - get_coercion_expr(arg, context, - ctest->resulttype, - ctest->resulttypmod, - node); - } - } - break; - - case T_CoerceToDomainValue: - appendStringInfoString(buf, "VALUE"); - break; - - case T_SetToDefault: - appendStringInfoString(buf, "DEFAULT"); - break; - - case T_CurrentOfExpr: - { - CurrentOfExpr *cexpr = (CurrentOfExpr *) node; - - if (cexpr->cursor_name) - appendStringInfo(buf, "CURRENT OF %s", - quote_identifier(cexpr->cursor_name)); - else - appendStringInfo(buf, "CURRENT OF $%d", - cexpr->cursor_param); - } - break; - - case T_NextValueExpr: - { - NextValueExpr *nvexpr = (NextValueExpr *) node; - - /* - * This isn't exactly nextval(), but that seems close enough - * for EXPLAIN's purposes. - */ - appendStringInfoString(buf, "nextval("); - simple_quote_literal(buf, - generate_relation_name(nvexpr->seqid, - NIL)); - appendStringInfoChar(buf, ')'); - } - break; - - case T_InferenceElem: - { - InferenceElem *iexpr = (InferenceElem *) node; - bool save_varprefix; - bool need_parens; - - /* - * InferenceElem can only refer to target relation, so a - * prefix is not useful, and indeed would cause parse errors. - */ - save_varprefix = context->varprefix; - context->varprefix = false; - - /* - * Parenthesize the element unless it's a simple Var or a bare - * function call. Follows pg_get_indexdef_worker(). - */ - need_parens = !IsA(iexpr->expr, Var); - if (IsA(iexpr->expr, FuncExpr) && - ((FuncExpr *) iexpr->expr)->funcformat == - COERCE_EXPLICIT_CALL) - need_parens = false; - - if (need_parens) - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) iexpr->expr, - context, false); - if (need_parens) - appendStringInfoChar(buf, ')'); - - context->varprefix = save_varprefix; - - if (iexpr->infercollid) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(iexpr->infercollid)); - - /* Add the operator class name, if not default */ - if (iexpr->inferopclass) - { - Oid inferopclass = iexpr->inferopclass; - Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); - - get_opclass_name(inferopclass, inferopcinputtype, buf); - } - } - break; - - case T_PartitionBoundSpec: - { - PartitionBoundSpec *spec = (PartitionBoundSpec *) node; - ListCell *cell; - char *sep; - - if (spec->is_default) - { - appendStringInfoString(buf, "DEFAULT"); - break; - } - - switch (spec->strategy) - { - case PARTITION_STRATEGY_HASH: - Assert(spec->modulus > 0 && spec->remainder >= 0); - Assert(spec->modulus > spec->remainder); - - appendStringInfoString(buf, "FOR VALUES"); - appendStringInfo(buf, " WITH (modulus %d, remainder %d)", - spec->modulus, spec->remainder); - break; - - case PARTITION_STRATEGY_LIST: - Assert(spec->listdatums != NIL); - - appendStringInfoString(buf, "FOR VALUES IN ("); - sep = ""; - foreach(cell, spec->listdatums) - { - Const *val = castNode(Const, lfirst(cell)); - - appendStringInfoString(buf, sep); - get_const_expr(val, context, -1); - sep = ", "; - } - - appendStringInfoChar(buf, ')'); - break; - - case PARTITION_STRATEGY_RANGE: - Assert(spec->lowerdatums != NIL && - spec->upperdatums != NIL && - list_length(spec->lowerdatums) == - list_length(spec->upperdatums)); - - appendStringInfo(buf, "FOR VALUES FROM %s TO %s", - get_range_partbound_string(spec->lowerdatums), - get_range_partbound_string(spec->upperdatums)); - break; - - default: - elog(ERROR, "unrecognized partition strategy: %d", - (int) spec->strategy); - break; - } - } - break; - - case T_List: - { - char *sep; - ListCell *l; - - sep = ""; - foreach(l, (List *) node) - { - appendStringInfoString(buf, sep); - get_rule_expr((Node *) lfirst(l), context, showimplicit); - sep = ", "; - } - } - break; - - case T_TableFunc: - get_tablefunc((TableFunc *) node, context, showimplicit); - break; - - case T_CallStmt: - get_func_expr(((CallStmt *) node)->funcexpr, context, showimplicit); - break; - - default: - elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); - break; - } -} - -/* - * get_rule_expr_toplevel - Parse back a toplevel expression - * - * Same as get_rule_expr(), except that if the expr is just a Var, we pass - * istoplevel = true not false to get_variable(). This causes whole-row Vars - * to get printed with decoration that will prevent expansion of "*". - * We need to use this in contexts such as ROW() and VALUES(), where the - * parser would expand "foo.*" appearing at top level. (In principle we'd - * use this in get_target_list() too, but that has additional worries about - * whether to print AS, so it needs to invoke get_variable() directly anyway.) - */ -static void -get_rule_expr_toplevel(Node *node, deparse_context *context, - bool showimplicit) -{ - if (node && IsA(node, Var)) - (void) get_variable((Var *) node, 0, true, context); - else - get_rule_expr(node, context, showimplicit); -} - -/* - * get_rule_expr_funccall - Parse back a function-call expression - * - * Same as get_rule_expr(), except that we guarantee that the output will - * look like a function call, or like one of the things the grammar treats as - * equivalent to a function call (see the func_expr_windowless production). - * This is needed in places where the grammar uses func_expr_windowless and - * you can't substitute a parenthesized a_expr. If what we have isn't going - * to look like a function call, wrap it in a dummy CAST() expression, which - * will satisfy the grammar --- and, indeed, is likely what the user wrote to - * produce such a thing. - */ -static void -get_rule_expr_funccall(Node *node, deparse_context *context, - bool showimplicit) -{ - if (looks_like_function(node)) - get_rule_expr(node, context, showimplicit); - else - { - StringInfo buf = context->buf; - - appendStringInfoString(buf, "CAST("); - /* no point in showing any top-level implicit cast */ - get_rule_expr(node, context, false); - appendStringInfo(buf, " AS %s)", - format_type_with_typemod(exprType(node), - exprTypmod(node))); - } -} - -/* - * Helper function to identify node types that satisfy func_expr_windowless. - * If in doubt, "false" is always a safe answer. - */ -static bool -looks_like_function(Node *node) -{ - if (node == NULL) - return false; /* probably shouldn't happen */ - switch (nodeTag(node)) - { - case T_FuncExpr: - /* OK, unless it's going to deparse as a cast */ - return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL); - case T_NullIfExpr: - case T_CoalesceExpr: - case T_MinMaxExpr: - case T_SQLValueFunction: - case T_XmlExpr: - /* these are all accepted by func_expr_common_subexpr */ - return true; - default: - break; - } - return false; -} - - -/* - * get_oper_expr - Parse back an OpExpr node - */ -static void -get_oper_expr(OpExpr *expr, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid opno = expr->opno; - List *args = expr->args; - - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - if (list_length(args) == 2) - { - /* binary operator */ - Node *arg1 = (Node *) linitial(args); - Node *arg2 = (Node *) lsecond(args); - - get_rule_expr_paren(arg1, context, true, (Node *) expr); - appendStringInfo(buf, " %s ", - generate_operator_name(opno, - exprType(arg1), - exprType(arg2))); - get_rule_expr_paren(arg2, context, true, (Node *) expr); - } - else - { - /* unary operator --- but which side? */ - Node *arg = (Node *) linitial(args); - HeapTuple tp; - Form_pg_operator optup; - - tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for operator %u", opno); - optup = (Form_pg_operator) GETSTRUCT(tp); - switch (optup->oprkind) - { - case 'l': - appendStringInfo(buf, "%s ", - generate_operator_name(opno, - InvalidOid, - exprType(arg))); - get_rule_expr_paren(arg, context, true, (Node *) expr); - break; - case 'r': - get_rule_expr_paren(arg, context, true, (Node *) expr); - appendStringInfo(buf, " %s", - generate_operator_name(opno, - exprType(arg), - InvalidOid)); - break; - default: - elog(ERROR, "bogus oprkind: %d", optup->oprkind); - } - ReleaseSysCache(tp); - } - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); -} - -/* - * get_func_expr - Parse back a FuncExpr node - */ -static void -get_func_expr(FuncExpr *expr, deparse_context *context, - bool showimplicit) -{ - StringInfo buf = context->buf; - Oid funcoid = expr->funcid; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - bool use_variadic; - ListCell *l; - - /* - * If the function call came from an implicit coercion, then just show the - * first argument --- unless caller wants to see implicit coercions. - */ - if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) - { - get_rule_expr_paren((Node *) linitial(expr->args), context, - false, (Node *) expr); - return; - } - - /* - * If the function call came from a cast, then show the first argument - * plus an explicit cast operation. - */ - if (expr->funcformat == COERCE_EXPLICIT_CAST || - expr->funcformat == COERCE_IMPLICIT_CAST) - { - Node *arg = linitial(expr->args); - Oid rettype = expr->funcresulttype; - int32 coercedTypmod; - - /* Get the typmod if this is a length-coercion function */ - (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); - - get_coercion_expr(arg, context, - rettype, coercedTypmod, - (Node *) expr); - - return; - } - - /* - * Normal function: display as proname(args). First we need to extract - * the argument datatypes. - */ - if (list_length(expr->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, expr->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(funcoid, nargs, - argnames, argtypes, - expr->funcvariadic, - &use_variadic, - context->special_exprkind)); - nargs = 0; - foreach(l, expr->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && lnext(l) == NULL) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr((Node *) lfirst(l), context, true); - } - appendStringInfoChar(buf, ')'); -} - -/* - * get_agg_expr - Parse back an Aggref node - */ -static void -get_agg_expr(Aggref *aggref, deparse_context *context, - Aggref *original_aggref) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - bool use_variadic; - - /* - * For a combining aggregate, we look up and deparse the corresponding - * partial aggregate instead. This is necessary because our input - * argument list has been replaced; the new argument list always has just - * one element, which will point to a partial Aggref that supplies us with - * transition states to combine. - */ - if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) - { - TargetEntry *tle = linitial_node(TargetEntry, aggref->args); - - Assert(list_length(aggref->args) == 1); - resolve_special_varno((Node *) tle->expr, context, original_aggref, - get_agg_combine_expr); - return; - } - - /* - * Mark as PARTIAL, if appropriate. We look to the original aggref so as - * to avoid printing this when recursing from the code just above. - */ - if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) - appendStringInfoString(buf, "PARTIAL "); - - /* Extract the argument types as seen by the parser */ - nargs = get_aggregate_argtypes(aggref, argtypes); - - /* Print the aggregate name, schema-qualified if needed */ - appendStringInfo(buf, "%s(%s", - generate_function_name(aggref->aggfnoid, nargs, - NIL, argtypes, - aggref->aggvariadic, - &use_variadic, - context->special_exprkind), - (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); - - if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) - { - /* - * Ordered-set aggregates do not use "*" syntax. Also, we needn't - * worry about inserting VARIADIC. So we can just dump the direct - * args as-is. - */ - Assert(!aggref->aggvariadic); - get_rule_expr((Node *) aggref->aggdirectargs, context, true); - Assert(aggref->aggorder != NIL); - appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - else - { - /* aggstar can be set only in zero-argument aggregates */ - if (aggref->aggstar) - appendStringInfoChar(buf, '*'); - else - { - ListCell *l; - int i; - - i = 0; - foreach(l, aggref->args) - { - TargetEntry *tle = (TargetEntry *) lfirst(l); - Node *arg = (Node *) tle->expr; - - Assert(!IsA(arg, NamedArgExpr)); - if (tle->resjunk) - continue; - if (i++ > 0) - appendStringInfoString(buf, ", "); - if (use_variadic && i == nargs) - appendStringInfoString(buf, "VARIADIC "); - get_rule_expr(arg, context, true); - } - } - - if (aggref->aggorder != NIL) - { - appendStringInfoString(buf, " ORDER BY "); - get_rule_orderby(aggref->aggorder, aggref->args, false, context); - } - } - - if (aggref->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) aggref->aggfilter, context, false); - } - - appendStringInfoChar(buf, ')'); -} - -/* - * This is a helper function for get_agg_expr(). It's used when we deparse - * a combining Aggref; resolve_special_varno locates the corresponding partial - * Aggref and then calls this. - */ -static void -get_agg_combine_expr(Node *node, deparse_context *context, void *private) -{ - Aggref *aggref; - Aggref *original_aggref = private; - - if (!IsA(node, Aggref)) - elog(ERROR, "combining Aggref does not point to an Aggref"); - - aggref = (Aggref *) node; - get_agg_expr(aggref, context, original_aggref); -} - -/* - * get_windowfunc_expr - Parse back a WindowFunc node - */ -static void -get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[FUNC_MAX_ARGS]; - int nargs; - List *argnames; - ListCell *l; - - if (list_length(wfunc->args) > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg("too many arguments"))); - nargs = 0; - argnames = NIL; - foreach(l, wfunc->args) - { - Node *arg = (Node *) lfirst(l); - - if (IsA(arg, NamedArgExpr)) - argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); - argtypes[nargs] = exprType(arg); - nargs++; - } - - appendStringInfo(buf, "%s(", - generate_function_name(wfunc->winfnoid, nargs, - argnames, argtypes, - false, NULL, - context->special_exprkind)); - /* winstar can be set only in zero-argument aggregates */ - if (wfunc->winstar) - appendStringInfoChar(buf, '*'); - else - get_rule_expr((Node *) wfunc->args, context, true); - - if (wfunc->aggfilter != NULL) - { - appendStringInfoString(buf, ") FILTER (WHERE "); - get_rule_expr((Node *) wfunc->aggfilter, context, false); - } - - appendStringInfoString(buf, ") OVER "); - - foreach(l, context->windowClause) - { - WindowClause *wc = (WindowClause *) lfirst(l); - - if (wc->winref == wfunc->winref) - { - if (wc->name) - appendStringInfoString(buf, quote_identifier(wc->name)); - else - get_rule_windowspec(wc, context->windowTList, context); - break; - } - } - if (l == NULL) - { - if (context->windowClause) - elog(ERROR, "could not find window clause for winref %u", - wfunc->winref); - - /* - * In EXPLAIN, we don't have window context information available, so - * we have to settle for this: - */ - appendStringInfoString(buf, "(?)"); - } -} - -/* ---------- - * get_coercion_expr - * - * Make a string representation of a value coerced to a specific type - * ---------- - */ -static void -get_coercion_expr(Node *arg, deparse_context *context, - Oid resulttype, int32 resulttypmod, - Node *parentNode) -{ - StringInfo buf = context->buf; - - /* - * Since parse_coerce.c doesn't immediately collapse application of - * length-coercion functions to constants, what we'll typically see in - * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of - * suppressing casts when the user actually wrote something like - * 'foo'::text::char(3). - * - * Note: it might seem that we are missing the possibility of needing to - * print a COLLATE clause for such a Const. However, a Const could only - * have nondefault collation in a post-constant-folding tree, in which the - * length coercion would have been folded too. See also the special - * handling of CollateExpr in coerce_to_target_type(): any collation - * marking will be above the coercion node, not below it. - */ - if (arg && IsA(arg, Const) && - ((Const *) arg)->consttype == resulttype && - ((Const *) arg)->consttypmod == -1) - { - /* Show the constant without normal ::typename decoration */ - get_const_expr((Const *) arg, context, -1); - } - else - { - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr_paren(arg, context, false, parentNode); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - appendStringInfo(buf, "::%s", - format_type_with_typemod(resulttype, resulttypmod)); -} - -/* ---------- - * get_const_expr - * - * Make a string representation of a Const - * - * showtype can be -1 to never show "::typename" decoration, or +1 to always - * show it, or 0 to show it only if the constant wouldn't be assumed to be - * the right type by default. - * - * If the Const's collation isn't default for its type, show that too. - * We mustn't do this when showtype is -1 (since that means the caller will - * print "::typename", and we can't put a COLLATE clause in between). It's - * caller's responsibility that collation isn't missed in such cases. - * ---------- - */ -static void -get_const_expr(Const *constval, deparse_context *context, int showtype) -{ - StringInfo buf = context->buf; - Oid typoutput; - bool typIsVarlena; - char *extval; - bool needlabel = false; - - if (constval->constisnull) - { - /* - * Always label the type of a NULL constant to prevent misdecisions - * about type when reparsing. - */ - appendStringInfoString(buf, "NULL"); - if (showtype >= 0) - { - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - get_const_collation(constval, context); - } - return; - } - - getTypeOutputInfo(constval->consttype, - &typoutput, &typIsVarlena); - - extval = OidOutputFunctionCall(typoutput, constval->constvalue); - - switch (constval->consttype) - { - case INT4OID: - - /* - * INT4 can be printed without any decoration, unless it is - * negative; in that case print it as '-nnn'::integer to ensure - * that the output will re-parse as a constant, not as a constant - * plus operator. In most cases we could get away with printing - * (-nnn) instead, because of the way that gram.y handles negative - * literals; but that doesn't work for INT_MIN, and it doesn't - * seem that much prettier anyway. - */ - if (extval[0] != '-') - appendStringInfoString(buf, extval); - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case NUMERICOID: - - /* - * NUMERIC can be printed without quotes if it looks like a float - * constant (not an integer, and not Infinity or NaN) and doesn't - * have a leading sign (for the same reason as for INT4). - */ - if (isdigit((unsigned char) extval[0]) && - strcspn(extval, "eE.") != strlen(extval)) - { - appendStringInfoString(buf, extval); - } - else - { - appendStringInfo(buf, "'%s'", extval); - needlabel = true; /* we must attach a cast */ - } - break; - - case BITOID: - case VARBITOID: - appendStringInfo(buf, "B'%s'", extval); - break; - - case BOOLOID: - if (strcmp(extval, "t") == 0) - appendStringInfoString(buf, "true"); - else - appendStringInfoString(buf, "false"); - break; - - default: - simple_quote_literal(buf, extval); - break; - } - - pfree(extval); - - if (showtype < 0) - return; - - /* - * For showtype == 0, append ::typename unless the constant will be - * implicitly typed as the right type when it is read in. - * - * XXX this code has to be kept in sync with the behavior of the parser, - * especially make_const. - */ - switch (constval->consttype) - { - case BOOLOID: - case UNKNOWNOID: - /* These types can be left unlabeled */ - needlabel = false; - break; - case INT4OID: - /* We determined above whether a label is needed */ - break; - case NUMERICOID: - - /* - * Float-looking constants will be typed as numeric, which we - * checked above; but if there's a nondefault typmod we need to - * show it. - */ - needlabel |= (constval->consttypmod >= 0); - break; - default: - needlabel = true; - break; - } - if (needlabel || showtype > 0) - appendStringInfo(buf, "::%s", - format_type_with_typemod(constval->consttype, - constval->consttypmod)); - - get_const_collation(constval, context); -} - -/* - * helper for get_const_expr: append COLLATE if needed - */ -static void -get_const_collation(Const *constval, deparse_context *context) -{ - StringInfo buf = context->buf; - - if (OidIsValid(constval->constcollid)) - { - Oid typcollation = get_typcollation(constval->consttype); - - if (constval->constcollid != typcollation) - { - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(constval->constcollid)); - } - } -} - -/* - * simple_quote_literal - Format a string as a SQL literal, append to buf - */ -static void -simple_quote_literal(StringInfo buf, const char *val) -{ - const char *valptr; - - /* - * We form the string literal according to the prevailing setting of - * standard_conforming_strings; we never use E''. User is responsible for - * making sure result is used correctly. - */ - appendStringInfoChar(buf, '\''); - for (valptr = val; *valptr; valptr++) - { - char ch = *valptr; - - if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) - appendStringInfoChar(buf, ch); - appendStringInfoChar(buf, ch); - } - appendStringInfoChar(buf, '\''); -} - - -/* ---------- - * get_sublink_expr - Parse back a sublink - * ---------- - */ -static void -get_sublink_expr(SubLink *sublink, deparse_context *context) -{ - StringInfo buf = context->buf; - Query *query = (Query *) (sublink->subselect); - char *opname = NULL; - bool need_paren; - - if (sublink->subLinkType == ARRAY_SUBLINK) - appendStringInfoString(buf, "ARRAY("); - else - appendStringInfoChar(buf, '('); - - /* - * Note that we print the name of only the first operator, when there are - * multiple combining operators. This is an approximation that could go - * wrong in various scenarios (operators in different schemas, renamed - * operators, etc) but there is not a whole lot we can do about it, since - * the syntax allows only one operator to be shown. - */ - if (sublink->testexpr) - { - if (IsA(sublink->testexpr, OpExpr)) - { - /* single combining operator */ - OpExpr *opexpr = (OpExpr *) sublink->testexpr; - - get_rule_expr(linitial(opexpr->args), context, true); - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - } - else if (IsA(sublink->testexpr, BoolExpr)) - { - /* multiple combining operators, = or <> cases */ - char *sep; - ListCell *l; - - appendStringInfoChar(buf, '('); - sep = ""; - foreach(l, ((BoolExpr *) sublink->testexpr)->args) - { - OpExpr *opexpr = lfirst_node(OpExpr, l); - - appendStringInfoString(buf, sep); - get_rule_expr(linitial(opexpr->args), context, true); - if (!opname) - opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); - sep = ", "; - } - appendStringInfoChar(buf, ')'); - } - else if (IsA(sublink->testexpr, RowCompareExpr)) - { - /* multiple combining operators, < <= > >= cases */ - RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) rcexpr->largs, context, true); - opname = generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs))); - appendStringInfoChar(buf, ')'); - } - else - elog(ERROR, "unrecognized testexpr type: %d", - (int) nodeTag(sublink->testexpr)); - } - - need_paren = true; - - switch (sublink->subLinkType) - { - case EXISTS_SUBLINK: - appendStringInfoString(buf, "EXISTS "); - break; - - case ANY_SUBLINK: - if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ - appendStringInfoString(buf, " IN "); - else - appendStringInfo(buf, " %s ANY ", opname); - break; - - case ALL_SUBLINK: - appendStringInfo(buf, " %s ALL ", opname); - break; - - case ROWCOMPARE_SUBLINK: - appendStringInfo(buf, " %s ", opname); - break; - - case EXPR_SUBLINK: - case MULTIEXPR_SUBLINK: - case ARRAY_SUBLINK: - need_paren = false; - break; - - case CTE_SUBLINK: /* shouldn't occur in a SubLink */ - default: - elog(ERROR, "unrecognized sublink type: %d", - (int) sublink->subLinkType); - break; - } - - if (need_paren) - appendStringInfoChar(buf, '('); - - get_query_def(query, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - - if (need_paren) - appendStringInfoString(buf, "))"); - else - appendStringInfoChar(buf, ')'); -} - - -/* ---------- - * get_tablefunc - Parse back a table function - * ---------- - */ -static void -get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit) -{ - StringInfo buf = context->buf; - - /* XMLTABLE is the only existing implementation. */ - - appendStringInfoString(buf, "XMLTABLE("); - - if (tf->ns_uris != NIL) - { - ListCell *lc1, - *lc2; - bool first = true; - - appendStringInfoString(buf, "XMLNAMESPACES ("); - forboth(lc1, tf->ns_uris, lc2, tf->ns_names) - { - Node *expr = (Node *) lfirst(lc1); - char *name = strVal(lfirst(lc2)); - - if (!first) - appendStringInfoString(buf, ", "); - else - first = false; - - if (name != NULL) - { - get_rule_expr(expr, context, showimplicit); - appendStringInfo(buf, " AS %s", name); - } - else - { - appendStringInfoString(buf, "DEFAULT "); - get_rule_expr(expr, context, showimplicit); - } - } - appendStringInfoString(buf, "), "); - } - - appendStringInfoChar(buf, '('); - get_rule_expr((Node *) tf->rowexpr, context, showimplicit); - appendStringInfoString(buf, ") PASSING ("); - get_rule_expr((Node *) tf->docexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - - if (tf->colexprs != NIL) - { - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - ListCell *l5; - int colnum = 0; - - appendStringInfoString(buf, " COLUMNS "); - forfive(l1, tf->colnames, l2, tf->coltypes, l3, tf->coltypmods, - l4, tf->colexprs, l5, tf->coldefexprs) - { - char *colname = strVal(lfirst(l1)); - Oid typid = lfirst_oid(l2); - int32 typmod = lfirst_int(l3); - Node *colexpr = (Node *) lfirst(l4); - Node *coldefexpr = (Node *) lfirst(l5); - bool ordinality = (tf->ordinalitycol == colnum); - bool notnull = bms_is_member(colnum, tf->notnulls); - - if (colnum > 0) - appendStringInfoString(buf, ", "); - colnum++; - - appendStringInfo(buf, "%s %s", quote_identifier(colname), - ordinality ? "FOR ORDINALITY" : - format_type_with_typemod(typid, typmod)); - if (ordinality) - continue; - - if (coldefexpr != NULL) - { - appendStringInfoString(buf, " DEFAULT ("); - get_rule_expr((Node *) coldefexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - } - if (colexpr != NULL) - { - appendStringInfoString(buf, " PATH ("); - get_rule_expr((Node *) colexpr, context, showimplicit); - appendStringInfoChar(buf, ')'); - } - if (notnull) - appendStringInfoString(buf, " NOT NULL"); - } - } - - appendStringInfoChar(buf, ')'); -} - -/* ---------- - * get_from_clause - Parse back a FROM clause - * - * "prefix" is the keyword that denotes the start of the list of FROM - * elements. It is FROM when used to parse back SELECT and UPDATE, but - * is USING when parsing back DELETE. - * ---------- - */ -static void -get_from_clause(Query *query, const char *prefix, deparse_context *context) -{ - StringInfo buf = context->buf; - bool first = true; - ListCell *l; - - /* - * We use the query's jointree as a guide to what to print. However, we - * must ignore auto-added RTEs that are marked not inFromCl. (These can - * only appear at the top level of the jointree, so it's sufficient to - * check here.) This check also ensures we ignore the rule pseudo-RTEs - * for NEW and OLD. - */ - foreach(l, query->jointree->fromlist) - { - Node *jtnode = (Node *) lfirst(l); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - - if (!rte->inFromCl) - continue; - } - - if (first) - { - appendContextKeyword(context, prefix, - -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); - first = false; - - get_from_clause_item(jtnode, query, context); - } - else - { - StringInfoData itembuf; - - appendStringInfoString(buf, ", "); - - /* - * Put the new FROM item's text into itembuf so we can decide - * after we've got it whether or not it needs to go on a new line. - */ - initStringInfo(&itembuf); - context->buf = &itembuf; - - get_from_clause_item(jtnode, query, context); - - /* Restore context's output buffer */ - context->buf = buf; - - /* Consider line-wrapping if enabled */ - if (PRETTY_INDENT(context) && context->wrapColumn >= 0) - { - /* Does the new item start with a new line? */ - if (itembuf.len > 0 && itembuf.data[0] == '\n') - { - /* If so, we shouldn't add anything */ - /* instead, remove any trailing spaces currently in buf */ - removeStringInfoSpaces(buf); - } - else - { - char *trailing_nl; - - /* Locate the start of the current line in the buffer */ - trailing_nl = strrchr(buf->data, '\n'); - if (trailing_nl == NULL) - trailing_nl = buf->data; - else - trailing_nl++; - - /* - * Add a newline, plus some indentation, if the new item - * would cause an overflow. - */ - if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) - appendContextKeyword(context, "", -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_VAR); - } - } - - /* Add the new item */ - appendStringInfoString(buf, itembuf.data); - - /* clean up */ - pfree(itembuf.data); - } - } -} - -static void -get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) -{ - StringInfo buf = context->buf; - deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); - - if (IsA(jtnode, RangeTblRef)) - { - int varno = ((RangeTblRef *) jtnode)->rtindex; - RangeTblEntry *rte = rt_fetch(varno, query->rtable); - char *refname = get_rtable_name(varno, context); - deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); - RangeTblFunction *rtfunc1 = NULL; - bool printalias; - CitusRTEKind rteKind = GetRangeTblKind(rte); - - if (rte->lateral) - appendStringInfoString(buf, "LATERAL "); - - /* Print the FROM item proper */ - switch (rte->rtekind) - { - case RTE_RELATION: - /* Normal relation RTE */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_relation_or_shard_name(rte->relid, - context->distrelid, - context->shardid, - context->namespaces)); - break; - case RTE_SUBQUERY: - /* Subquery RTE */ - appendStringInfoChar(buf, '('); - get_query_def(rte->subquery, buf, context->namespaces, NULL, - context->prettyFlags, context->wrapColumn, - context->indentLevel); - appendStringInfoChar(buf, ')'); - break; - case RTE_FUNCTION: - /* if it's a shard, do differently */ - if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) - { - char *fragmentSchemaName = NULL; - char *fragmentTableName = NULL; - - ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); - - /* use schema and table name from the remote alias */ - appendStringInfo(buf, "%s%s", - only_marker(rte), - generate_fragment_name(fragmentSchemaName, - fragmentTableName)); - break; - } - - /* Function RTE */ - rtfunc1 = (RangeTblFunction *) linitial(rte->functions); - - /* - * Omit ROWS FROM() syntax for just one function, unless it - * has both a coldeflist and WITH ORDINALITY. If it has both, - * we must use ROWS FROM() syntax to avoid ambiguity about - * whether the coldeflist includes the ordinality column. - */ - if (list_length(rte->functions) == 1 && - (rtfunc1->funccolnames == NIL || !rte->funcordinality)) - { - get_rule_expr_funccall(rtfunc1->funcexpr, context, true); - /* we'll print the coldeflist below, if it has one */ - } - else - { - bool all_unnest; - ListCell *lc; - - /* - * If all the function calls in the list are to unnest, - * and none need a coldeflist, then collapse the list back - * down to UNNEST(args). (If we had more than one - * built-in unnest function, this would get more - * difficult.) - * - * XXX This is pretty ugly, since it makes not-terribly- - * future-proof assumptions about what the parser would do - * with the output; but the alternative is to emit our - * nonstandard ROWS FROM() notation for what might have - * been a perfectly spec-compliant multi-argument - * UNNEST(). - */ - all_unnest = true; - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (!IsA(rtfunc->funcexpr, FuncExpr) || - ((FuncExpr *) rtfunc->funcexpr)->funcid != F_ARRAY_UNNEST || - rtfunc->funccolnames != NIL) - { - all_unnest = false; - break; - } - } - - if (all_unnest) - { - List *allargs = NIL; - - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - List *args = ((FuncExpr *) rtfunc->funcexpr)->args; - - allargs = list_concat(allargs, list_copy(args)); - } - - appendStringInfoString(buf, "UNNEST("); - get_rule_expr((Node *) allargs, context, true); - appendStringInfoChar(buf, ')'); - } - else - { - int funcno = 0; - - appendStringInfoString(buf, "ROWS FROM("); - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - if (funcno > 0) - appendStringInfoString(buf, ", "); - get_rule_expr_funccall(rtfunc->funcexpr, context, true); - if (rtfunc->funccolnames != NIL) - { - /* Reconstruct the column definition list */ - appendStringInfoString(buf, " AS "); - get_from_clause_coldeflist(rtfunc, - NULL, - context); - } - funcno++; - } - appendStringInfoChar(buf, ')'); - } - /* prevent printing duplicate coldeflist below */ - rtfunc1 = NULL; - } - if (rte->funcordinality) - appendStringInfoString(buf, " WITH ORDINALITY"); - break; - case RTE_TABLEFUNC: - get_tablefunc(rte->tablefunc, context, true); - break; - case RTE_VALUES: - /* Values list RTE */ - appendStringInfoChar(buf, '('); - get_values_def(rte->values_lists, context); - appendStringInfoChar(buf, ')'); - break; - case RTE_CTE: - appendStringInfoString(buf, quote_identifier(rte->ctename)); - break; - default: - elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); - break; - } - - /* Print the relation alias, if needed */ - printalias = false; - if (rte->alias != NULL) - { - /* Always print alias if user provided one */ - printalias = true; - } - else if (colinfo->printaliases) - { - /* Always print alias if we need to print column aliases */ - printalias = true; - } - else if (rte->rtekind == RTE_RELATION) - { - /* - * No need to print alias if it's same as relation name (this - * would normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, get_relation_name(rte->relid)) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_FUNCTION) - { - /* - * For a function RTE, always print alias. This covers possible - * renaming of the function and/or instability of the - * FigureColname rules for things that aren't simple functions. - * Note we'd need to force it anyway for the columndef list case. - */ - printalias = true; - } - else if (rte->rtekind == RTE_VALUES) - { - /* Alias is syntactically required for VALUES */ - printalias = true; - } - else if (rte->rtekind == RTE_CTE) - { - /* - * No need to print alias if it's same as CTE name (this would - * normally be the case, but not if set_rtable_names had to - * resolve a conflict). - */ - if (strcmp(refname, rte->ctename) != 0) - printalias = true; - } - else if (rte->rtekind == RTE_SUBQUERY) - { - /* subquery requires alias too */ - printalias = true; - } - if (printalias) - appendStringInfo(buf, " %s", quote_identifier(refname)); - - /* Print the column definitions or aliases, if needed */ - if (rtfunc1 && rtfunc1->funccolnames != NIL) - { - /* Reconstruct the columndef list, which is also the aliases */ - get_from_clause_coldeflist(rtfunc1, colinfo, context); - } - else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD) - { - /* Else print column aliases as needed */ - get_column_alias_list(colinfo, context); - } - /* check if column's are given aliases in distributed tables */ - else if (colinfo->parentUsing != NIL) - { - Assert(colinfo->printaliases); - get_column_alias_list(colinfo, context); - } - - /* Tablesample clause must go after any alias */ - if ((rteKind == CITUS_RTE_RELATION || rteKind == CITUS_RTE_SHARD) && - rte->tablesample) - { - get_tablesample_def(rte->tablesample, context); - } - } - else if (IsA(jtnode, JoinExpr)) - { - JoinExpr *j = (JoinExpr *) jtnode; - deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); - bool need_paren_on_right; - - need_paren_on_right = PRETTY_PAREN(context) && - !IsA(j->rarg, RangeTblRef) && - !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL); - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, '('); - - get_from_clause_item(j->larg, query, context); - - switch (j->jointype) - { - case JOIN_INNER: - if (j->quals) - appendContextKeyword(context, " JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - else - appendContextKeyword(context, " CROSS JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_LEFT: - appendContextKeyword(context, " LEFT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_FULL: - appendContextKeyword(context, " FULL JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - case JOIN_RIGHT: - appendContextKeyword(context, " RIGHT JOIN ", - -PRETTYINDENT_STD, - PRETTYINDENT_STD, - PRETTYINDENT_JOIN); - break; - default: - elog(ERROR, "unrecognized join type: %d", - (int) j->jointype); - } - - if (need_paren_on_right) - appendStringInfoChar(buf, '('); - get_from_clause_item(j->rarg, query, context); - if (need_paren_on_right) - appendStringInfoChar(buf, ')'); - - if (j->usingClause) - { - ListCell *lc; - bool first = true; - - appendStringInfoString(buf, " USING ("); - /* Use the assigned names, not what's in usingClause */ - foreach(lc, colinfo->usingNames) - { - char *colname = (char *) lfirst(lc); - - if (first) - first = false; - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - appendStringInfoChar(buf, ')'); - } - else if (j->quals) - { - appendStringInfoString(buf, " ON "); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, '('); - get_rule_expr(j->quals, context, false); - if (!PRETTY_PAREN(context)) - appendStringInfoChar(buf, ')'); - } - else if (j->jointype != JOIN_INNER) - { - /* If we didn't say CROSS JOIN above, we must provide an ON */ - appendStringInfoString(buf, " ON TRUE"); - } - - if (!PRETTY_PAREN(context) || j->alias != NULL) - appendStringInfoChar(buf, ')'); - - /* Yes, it's correct to put alias after the right paren ... */ - if (j->alias != NULL) - { - /* - * Note that it's correct to emit an alias clause if and only if - * there was one originally. Otherwise we'd be converting a named - * join to unnamed or vice versa, which creates semantic - * subtleties we don't want. However, we might print a different - * alias name than was there originally. - */ - appendStringInfo(buf, " %s", - quote_identifier(get_rtable_name(j->rtindex, - context))); - get_column_alias_list(colinfo, context); - } - } - else - elog(ERROR, "unrecognized node type: %d", - (int) nodeTag(jtnode)); -} - -/* - * get_column_alias_list - print column alias list for an RTE - * - * Caller must already have printed the relation's alias name. - */ -static void -get_column_alias_list(deparse_columns *colinfo, deparse_context *context) -{ - StringInfo buf = context->buf; - int i; - bool first = true; - - /* Don't print aliases if not needed */ - if (!colinfo->printaliases) - return; - - for (i = 0; i < colinfo->num_new_cols; i++) - { - char *colname = colinfo->new_colnames[i]; - - if (first) - { - appendStringInfoChar(buf, '('); - first = false; - } - else - appendStringInfoString(buf, ", "); - appendStringInfoString(buf, quote_identifier(colname)); - } - if (!first) - appendStringInfoChar(buf, ')'); -} - -/* - * get_from_clause_coldeflist - reproduce FROM clause coldeflist - * - * When printing a top-level coldeflist (which is syntactically also the - * relation's column alias list), use column names from colinfo. But when - * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the - * original coldeflist's names, which are available in rtfunc->funccolnames. - * Pass NULL for colinfo to select the latter behavior. - * - * The coldeflist is appended immediately (no space) to buf. Caller is - * responsible for ensuring that an alias or AS is present before it. - */ -static void -get_from_clause_coldeflist(RangeTblFunction *rtfunc, - deparse_columns *colinfo, - deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *l1; - ListCell *l2; - ListCell *l3; - ListCell *l4; - int i; - - appendStringInfoChar(buf, '('); - - i = 0; - forfour(l1, rtfunc->funccoltypes, - l2, rtfunc->funccoltypmods, - l3, rtfunc->funccolcollations, - l4, rtfunc->funccolnames) - { - Oid atttypid = lfirst_oid(l1); - int32 atttypmod = lfirst_int(l2); - Oid attcollation = lfirst_oid(l3); - char *attname; - - if (colinfo) - attname = colinfo->colnames[i]; - else - attname = strVal(lfirst(l4)); - - Assert(attname); /* shouldn't be any dropped columns here */ - - if (i > 0) - appendStringInfoString(buf, ", "); - appendStringInfo(buf, "%s %s", - quote_identifier(attname), - format_type_with_typemod(atttypid, atttypmod)); - if (OidIsValid(attcollation) && - attcollation != get_typcollation(atttypid)) - appendStringInfo(buf, " COLLATE %s", - generate_collation_name(attcollation)); - - i++; - } - - appendStringInfoChar(buf, ')'); -} - -/* - * get_tablesample_def - print a TableSampleClause - */ -static void -get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) -{ - StringInfo buf = context->buf; - Oid argtypes[1]; - int nargs; - ListCell *l; - - /* - * We should qualify the handler's function name if it wouldn't be - * resolved by lookup in the current search path. - */ - argtypes[0] = INTERNALOID; - appendStringInfo(buf, " TABLESAMPLE %s (", - generate_function_name(tablesample->tsmhandler, 1, - NIL, argtypes, - false, NULL, EXPR_KIND_NONE)); - - nargs = 0; - foreach(l, tablesample->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lfirst(l), context, false); - } - appendStringInfoChar(buf, ')'); - - if (tablesample->repeatable != NULL) - { - appendStringInfoString(buf, " REPEATABLE ("); - get_rule_expr((Node *) tablesample->repeatable, context, false); - appendStringInfoChar(buf, ')'); - } -} - - -/* - * get_opclass_name - fetch name of an index operator class - * - * The opclass name is appended (after a space) to buf. - * - * Output is suppressed if the opclass is the default for the given - * actual_datatype. (If you don't want this behavior, just pass - * InvalidOid for actual_datatype.) - */ -static void -get_opclass_name(Oid opclass, Oid actual_datatype, - StringInfo buf) -{ - HeapTuple ht_opc; - Form_pg_opclass opcrec; - char *opcname; - char *nspname; - - ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); - if (!HeapTupleIsValid(ht_opc)) - elog(ERROR, "cache lookup failed for opclass %u", opclass); - opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); - - if (!OidIsValid(actual_datatype) || - GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) - { - /* Okay, we need the opclass name. Do we need to qualify it? */ - opcname = NameStr(opcrec->opcname); - if (OpclassIsVisible(opclass)) - appendStringInfo(buf, " %s", quote_identifier(opcname)); - else - { - nspname = get_namespace_name(opcrec->opcnamespace); - appendStringInfo(buf, " %s.%s", - quote_identifier(nspname), - quote_identifier(opcname)); - } - } - ReleaseSysCache(ht_opc); -} - -/* - * processIndirection - take care of array and subfield assignment - * - * We strip any top-level FieldStore or assignment SubscriptingRef nodes that - * appear in the input, printing them as decoration for the base column - * name (which we assume the caller just printed). We might also need to - * strip CoerceToDomain nodes, but only ones that appear above assignment - * nodes. - * - * Returns the subexpression that's to be assigned. - */ -static Node * -processIndirection(Node *node, deparse_context *context) -{ - StringInfo buf = context->buf; - CoerceToDomain *cdomain = NULL; - - for (;;) - { - if (node == NULL) - break; - if (IsA(node, FieldStore)) - { - FieldStore *fstore = (FieldStore *) node; - Oid typrelid; - char *fieldname; - - /* lookup tuple type */ - typrelid = get_typ_typrelid(fstore->resulttype); - if (!OidIsValid(typrelid)) - elog(ERROR, "argument type %s of FieldStore is not a tuple type", - format_type_be(fstore->resulttype)); - - /* - * Print the field name. There should only be one target field in - * stored rules. There could be more than that in executable - * target lists, but this function cannot be used for that case. - */ - Assert(list_length(fstore->fieldnums) == 1); - fieldname = get_attname(typrelid, - linitial_int(fstore->fieldnums), false); - appendStringInfo(buf, ".%s", quote_identifier(fieldname)); - - /* - * We ignore arg since it should be an uninteresting reference to - * the target column or subcolumn. - */ - node = (Node *) linitial(fstore->newvals); - } - else if (IsA(node, SubscriptingRef)) - { - SubscriptingRef *sbsref = (SubscriptingRef *) node; - - if (sbsref->refassgnexpr == NULL) - break; - printSubscripts(sbsref, context); - - /* - * We ignore refexpr since it should be an uninteresting reference - * to the target column or subcolumn. - */ - node = (Node *) sbsref->refassgnexpr; - } - else if (IsA(node, CoerceToDomain)) - { - cdomain = (CoerceToDomain *) node; - /* If it's an explicit domain coercion, we're done */ - if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) - break; - /* Tentatively descend past the CoerceToDomain */ - node = (Node *) cdomain->arg; - } - else - break; - } - - /* - * If we descended past a CoerceToDomain whose argument turned out not to - * be a FieldStore or array assignment, back up to the CoerceToDomain. - * (This is not enough to be fully correct if there are nested implicit - * CoerceToDomains, but such cases shouldn't ever occur.) - */ - if (cdomain && node == (Node *) cdomain->arg) - node = (Node *) cdomain; - - return node; -} - -static void -printSubscripts(SubscriptingRef *sbsref, deparse_context *context) -{ - StringInfo buf = context->buf; - ListCell *lowlist_item; - ListCell *uplist_item; - - lowlist_item = list_head(sbsref->reflowerindexpr); /* could be NULL */ - foreach(uplist_item, sbsref->refupperindexpr) - { - appendStringInfoChar(buf, '['); - if (lowlist_item) - { - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(lowlist_item), context, false); - appendStringInfoChar(buf, ':'); - lowlist_item = lnext(lowlist_item); - } - /* If subexpression is NULL, get_rule_expr prints nothing */ - get_rule_expr((Node *) lfirst(uplist_item), context, false); - appendStringInfoChar(buf, ']'); - } -} - -/* - * get_relation_name - * Get the unqualified name of a relation specified by OID - * - * This differs from the underlying get_rel_name() function in that it will - * throw error instead of silently returning NULL if the OID is bad. - */ -static char * -get_relation_name(Oid relid) -{ - char *relname = get_rel_name(relid); - - if (!relname) - elog(ERROR, "cache lookup failed for relation %u", relid); - return relname; -} - -/* - * generate_relation_or_shard_name - * Compute the name to display for a relation or shard - * - * If the provided relid is equal to the provided distrelid, this function - * returns a shard-extended relation name; otherwise, it falls through to a - * simple generate_relation_name call. - */ -static char * -generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, - List *namespaces) -{ - char *relname = NULL; - - if (relid == distrelid) - { - relname = get_relation_name(relid); - - if (shardid > 0) - { - Oid schemaOid = get_rel_namespace(relid); - char *schemaName = get_namespace_name(schemaOid); - - AppendShardIdToName(&relname, shardid); - - relname = quote_qualified_identifier(schemaName, relname); - } - } - else - { - relname = generate_relation_name(relid, namespaces); - } - - return relname; -} - -/* - * generate_relation_name - * Compute the name to display for a relation specified by OID - * - * The result includes all necessary quoting and schema-prefixing. - * - * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. - * We will forcibly qualify the relation name if it equals any CTE name - * visible in the namespace list. - */ -char * -generate_relation_name(Oid relid, List *namespaces) -{ - HeapTuple tp; - Form_pg_class reltup; - bool need_qual; - ListCell *nslist; - char *relname; - char *nspname; - char *result; - - tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for relation %u", relid); - reltup = (Form_pg_class) GETSTRUCT(tp); - relname = NameStr(reltup->relname); - - /* Check for conflicting CTE name */ - need_qual = false; - foreach(nslist, namespaces) - { - deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); - ListCell *ctlist; - - foreach(ctlist, dpns->ctes) - { - CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); - - if (strcmp(cte->ctename, relname) == 0) - { - need_qual = true; - break; - } - } - if (need_qual) - break; - } - - /* Otherwise, qualify the name if not visible in search path */ - if (!need_qual) - need_qual = !RelationIsVisible(relid); - - if (need_qual) - nspname = get_namespace_name(reltup->relnamespace); - else - nspname = NULL; - - result = quote_qualified_identifier(nspname, relname); - - ReleaseSysCache(tp); - - return result; -} - - -/* - * generate_rte_shard_name returns the qualified name of the shard given a - * CITUS_RTE_SHARD range table entry. - */ -static char * -generate_rte_shard_name(RangeTblEntry *rangeTableEntry) -{ - char *shardSchemaName = NULL; - char *shardTableName = NULL; - - Assert(GetRangeTblKind(rangeTableEntry) == CITUS_RTE_SHARD); - - ExtractRangeTblExtraData(rangeTableEntry, NULL, &shardSchemaName, &shardTableName, - NULL); - - return generate_fragment_name(shardSchemaName, shardTableName); -} - - -/* - * generate_fragment_name - * Compute the name to display for a shard or merged table - * - * The result includes all necessary quoting and schema-prefixing. The schema - * name can be NULL for regular shards. For merged tables, they are always - * declared within a job-specific schema, and therefore can't have null schema - * names. - */ -static char * -generate_fragment_name(char *schemaName, char *tableName) -{ - StringInfo fragmentNameString = makeStringInfo(); - - if (schemaName != NULL) - { - appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), - quote_identifier(tableName)); - } - else - { - appendStringInfoString(fragmentNameString, quote_identifier(tableName)); - } - - return fragmentNameString->data; -} - -/* - * generate_function_name - * Compute the name to display for a function specified by OID, - * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) - * - * If we're dealing with a potentially variadic function (in practice, this - * means a FuncExpr or Aggref, not some other way of calling a function), then - * has_variadic must specify whether variadic arguments have been merged, - * and *use_variadic_p will be set to indicate whether to print VARIADIC in - * the output. For non-FuncExpr cases, has_variadic should be false and - * use_variadic_p can be NULL. - * - * The result includes all necessary quoting and schema-prefixing. - */ -static char * -generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, - bool has_variadic, bool *use_variadic_p, - ParseExprKind special_exprkind) -{ - char *result; - HeapTuple proctup; - Form_pg_proc procform; - char *proname; - bool use_variadic; - char *nspname; - FuncDetailCode p_result; - Oid p_funcid; - Oid p_rettype; - bool p_retset; - int p_nvargs; - Oid p_vatype; - Oid *p_true_typeids; - bool force_qualify = false; - - proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(proctup)) - elog(ERROR, "cache lookup failed for function %u", funcid); - procform = (Form_pg_proc) GETSTRUCT(proctup); - proname = NameStr(procform->proname); - - /* - * Due to parser hacks to avoid needing to reserve CUBE, we need to force - * qualification in some special cases. - */ - if (special_exprkind == EXPR_KIND_GROUP_BY) - { - if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) - force_qualify = true; - } - - /* - * Determine whether VARIADIC should be printed. We must do this first - * since it affects the lookup rules in func_get_detail(). - * - * Currently, we always print VARIADIC if the function has a merged - * variadic-array argument. Note that this is always the case for - * functions taking a VARIADIC argument type other than VARIADIC ANY. - * - * In principle, if VARIADIC wasn't originally specified and the array - * actual argument is deconstructable, we could print the array elements - * separately and not print VARIADIC, thus more nearly reproducing the - * original input. For the moment that seems like too much complication - * for the benefit, and anyway we do not know whether VARIADIC was - * originally specified if it's a non-ANY type. - */ - if (use_variadic_p) - { - /* Parser should not have set funcvariadic unless fn is variadic */ - Assert(!has_variadic || OidIsValid(procform->provariadic)); - use_variadic = has_variadic; - *use_variadic_p = use_variadic; - } - else - { - Assert(!has_variadic); - use_variadic = false; - } - - /* - * The idea here is to schema-qualify only if the parser would fail to - * resolve the correct function given the unqualified func name with the - * specified argtypes and VARIADIC flag. But if we already decided to - * force qualification, then we can skip the lookup and pretend we didn't - * find it. - */ - if (!force_qualify) - p_result = func_get_detail(list_make1(makeString(proname)), - NIL, argnames, nargs, argtypes, - !use_variadic, true, - &p_funcid, &p_rettype, - &p_retset, &p_nvargs, &p_vatype, - &p_true_typeids, NULL); - else - { - p_result = FUNCDETAIL_NOTFOUND; - p_funcid = InvalidOid; - } - - if ((p_result == FUNCDETAIL_NORMAL || - p_result == FUNCDETAIL_AGGREGATE || - p_result == FUNCDETAIL_WINDOWFUNC) && - p_funcid == funcid) - nspname = NULL; - else - nspname = get_namespace_name(procform->pronamespace); - - result = quote_qualified_identifier(nspname, proname); - - ReleaseSysCache(proctup); - - return result; -} - -/* - * generate_operator_name - * Compute the name to display for an operator specified by OID, - * given that it is being called with the specified actual arg types. - * (Arg types matter because of ambiguous-operator resolution rules. - * Pass InvalidOid for unused arg of a unary operator.) - * - * The result includes all necessary quoting and schema-prefixing, - * plus the OPERATOR() decoration needed to use a qualified operator name - * in an expression. - */ -char * -generate_operator_name(Oid operid, Oid arg1, Oid arg2) -{ - StringInfoData buf; - HeapTuple opertup; - Form_pg_operator operform; - char *oprname; - char *nspname; - - initStringInfo(&buf); - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); - if (!HeapTupleIsValid(opertup)) - elog(ERROR, "cache lookup failed for operator %u", operid); - operform = (Form_pg_operator) GETSTRUCT(opertup); - oprname = NameStr(operform->oprname); - - /* - * Unlike generate_operator_name() in postgres/src/backend/utils/adt/ruleutils.c, - * we don't check if the operator is in current namespace or not. This is - * because this check is costly when the operator is not in current namespace. - */ - nspname = get_namespace_name(operform->oprnamespace); - Assert(nspname != NULL); - appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); - appendStringInfoString(&buf, oprname); - appendStringInfoChar(&buf, ')'); - - ReleaseSysCache(opertup); - - return buf.data; -} - -/* - * get_one_range_partition_bound_string - * A C string representation of one range partition bound - */ -char * -get_range_partbound_string(List *bound_datums) -{ - deparse_context context; - StringInfo buf = makeStringInfo(); - ListCell *cell; - char *sep; - - memset(&context, 0, sizeof(deparse_context)); - context.buf = buf; - - appendStringInfoString(buf, "("); - sep = ""; - foreach(cell, bound_datums) - { - PartitionRangeDatum *datum = - castNode(PartitionRangeDatum, lfirst(cell)); - - appendStringInfoString(buf, sep); - if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) - appendStringInfoString(buf, "MINVALUE"); - else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE) - appendStringInfoString(buf, "MAXVALUE"); - else - { - Const *val = castNode(Const, datum->value); - - get_const_expr(val, &context, -1); - } - sep = ", "; - } - appendStringInfoChar(buf, ')'); - - return buf->data; -} - -#endif /* (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13) */ diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 6ab27d078..a0e16876a 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -1790,17 +1790,9 @@ AcquireExecutorShardLocksForExecution(DistributedExecution *execution) /* Acquire additional locks for SELECT .. FOR UPDATE on reference tables */ AcquireExecutorShardLocksForRelationRowLockList(task->relationRowLockList); - /* - * Due to PG commit 5ee190f8ec37c1bbfb3061e18304e155d600bc8e we copy the - * second parameter in pre-13. - */ relationRowLockList = list_concat(relationRowLockList, -#if (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13) - list_copy(task->relationRowLockList)); -#else task->relationRowLockList); -#endif /* * If the task has a subselect, then we may need to lock the shards from which @@ -1814,19 +1806,9 @@ AcquireExecutorShardLocksForExecution(DistributedExecution *execution) * and therefore prevents other modifications from running * concurrently. */ - - /* - * Due to PG commit 5ee190f8ec37c1bbfb3061e18304e155d600bc8e we copy the - * second parameter in pre-13. - */ requiresConsistentSnapshotRelationShardList = list_concat(requiresConsistentSnapshotRelationShardList, -#if (PG_VERSION_NUM >= PG_VERSION_12) && (PG_VERSION_NUM < PG_VERSION_13) - - list_copy(task->relationShardList)); -#else task->relationShardList); -#endif } } diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index d787b17aa..67747dee6 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -34,9 +34,7 @@ #include "catalog/pg_rewrite_d.h" #include "catalog/pg_shdepend.h" #include "catalog/pg_type.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/listutils.h" diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index fda5136e9..fb6efdae6 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -75,9 +75,7 @@ #include "utils/elog.h" #include "utils/hsearch.h" #include "utils/jsonb.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "utils/inval.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 38a2308ff..469cdcccb 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -3804,9 +3804,8 @@ RemoteTypeIdExpression(Oid typeId) /* * RemoteCollationIdExpression returns an expression in text form that can - * be used to obtain the OID of a type on a different node when included - * in a query string. Currently this is a sublink because regcollation type - * is not available in PG12. + * be used to obtain the OID of a collation on a different node when included + * in a query string. */ static char * RemoteCollationIdExpression(Oid colocationId) @@ -3825,16 +3824,15 @@ RemoteCollationIdExpression(Oid colocationId) (Form_pg_collation) GETSTRUCT(collationTuple); char *collationName = NameStr(collationform->collname); char *collationSchemaName = get_namespace_name(collationform->collnamespace); + char *qualifiedCollationName = quote_qualified_identifier(collationSchemaName, + collationName); - StringInfo colocationIdQuery = makeStringInfo(); - appendStringInfo(colocationIdQuery, - "(select oid from pg_collation" - " where collname = %s" - " and collnamespace = %s::regnamespace)", - quote_literal_cstr(collationName), - quote_literal_cstr(collationSchemaName)); + StringInfo regcollationExpression = makeStringInfo(); + appendStringInfo(regcollationExpression, + "%s::regcollation", + quote_literal_cstr(qualifiedCollationName)); - expression = colocationIdQuery->data; + expression = regcollationExpression->data; } ReleaseSysCache(collationTuple); diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index 2fd84d239..0b64f5726 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -66,9 +66,6 @@ #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" -#if PG_VERSION_NUM < 120000 -#include "utils/tqual.h" -#endif #define DISK_SPACE_FIELDS 2 diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 60cb39e3f..14d29c135 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -60,10 +60,7 @@ #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" - -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif /* RebalanceOptions are the options used to control the rebalance algorithm */ diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 39f6c0b63..7cc87dc9a 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -131,9 +131,7 @@ static void WarnIfListHasForeignDistributedTable(List *rangeTableList); /* Distributed planner hook */ PlannedStmt * distributed_planner(Query *parse, - #if PG_VERSION_NUM >= PG_VERSION_13 const char *query_string, - #endif int cursorOptions, ParamListInfo boundParams) { @@ -1839,7 +1837,7 @@ TranslatedVars(PlannerInfo *root, int relationIndex) FindTargetAppendRelInfo(root, relationIndex); if (targetAppendRelInfo != NULL) { - /* postgres deletes translated_vars after pg13, hence we deep copy them here */ + /* postgres deletes translated_vars, hence we deep copy them here */ Node *targetNode = NULL; foreach_ptr(targetNode, targetAppendRelInfo->translated_vars) { diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index ffba4d988..fe9d2ccef 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -170,7 +170,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext) } /* - * In pg12's planning phase empty FROMs are represented with an RTE_RESULT. + * In the planning phase empty FROMs are represented with an RTE_RESULT. * When we arrive here, standard_planner has already been called which calls * replace_empty_jointree() which replaces empty fromlist with a list of * single RTE_RESULT RangleTableRef node. diff --git a/src/backend/distributed/planner/intermediate_result_pruning.c b/src/backend/distributed/planner/intermediate_result_pruning.c index 2a3b4e423..94372f4e8 100644 --- a/src/backend/distributed/planner/intermediate_result_pruning.c +++ b/src/backend/distributed/planner/intermediate_result_pruning.c @@ -21,9 +21,7 @@ #include "distributed/query_utils.h" #include "distributed/worker_manager.h" #include "utils/builtins.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif /* controlled via GUC, used mostly for testing */ bool LogIntermediateResults = false; @@ -373,9 +371,6 @@ RemoveLocalNodeFromWorkerList(List *workerNodeList) int32 localGroupId = GetLocalGroupId(); ListCell *workerNodeCell = NULL; - #if PG_VERSION_NUM < PG_VERSION_13 - ListCell *prev = NULL; - #endif foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); @@ -383,9 +378,6 @@ RemoveLocalNodeFromWorkerList(List *workerNodeList) { return list_delete_cell_compat(workerNodeList, workerNodeCell, prev); } - #if PG_VERSION_NUM < PG_VERSION_13 - prev = workerNodeCell; - #endif } return workerNodeList; diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index c63d359ec..b627ecbfa 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -297,8 +297,6 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) */ char *queryString = pstrdup(""); instr_time planduration; - #if PG_VERSION_NUM >= PG_VERSION_13 - BufferUsage bufusage_start, bufusage; @@ -306,7 +304,7 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) { bufusage_start = pgBufferUsage; } - #endif + if (es->format == EXPLAIN_FORMAT_TEXT) { char *resultId = GenerateResultId(planId, subPlan->subPlanId); @@ -350,15 +348,12 @@ ExplainSubPlans(DistributedPlan *distributedPlan, ExplainState *es) INSTR_TIME_SET_ZERO(planduration); - #if PG_VERSION_NUM >= PG_VERSION_13 - /* calc differences of buffer counters. */ if (es->buffers) { memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } - #endif ExplainOpenGroup("PlannedStmt", "PlannedStmt", false, es); @@ -923,18 +918,13 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es) appendStringInfo(explainQuery, "EXPLAIN (ANALYZE %s, VERBOSE %s, " - "COSTS %s, BUFFERS %s, " -#if PG_VERSION_NUM >= PG_VERSION_13 - "WAL %s, " -#endif + "COSTS %s, BUFFERS %s, WAL %s, " "TIMING %s, SUMMARY %s, FORMAT %s) %s", es->analyze ? "TRUE" : "FALSE", es->verbose ? "TRUE" : "FALSE", es->costs ? "TRUE" : "FALSE", es->buffers ? "TRUE" : "FALSE", -#if PG_VERSION_NUM >= PG_VERSION_13 es->wal ? "TRUE" : "FALSE", -#endif es->timing ? "TRUE" : "FALSE", es->summary ? "TRUE" : "FALSE", formatStr, @@ -1028,9 +1018,7 @@ worker_save_query_explain_analyze(PG_FUNCTION_ARGS) /* use the same defaults as NewExplainState() for following options */ es->buffers = ExtractFieldBoolean(explainOptions, "buffers", es->buffers); -#if PG_VERSION_NUM >= PG_VERSION_13 es->wal = ExtractFieldBoolean(explainOptions, "wal", es->wal); -#endif es->costs = ExtractFieldBoolean(explainOptions, "costs", es->costs); es->summary = ExtractFieldBoolean(explainOptions, "summary", es->summary); es->verbose = ExtractFieldBoolean(explainOptions, "verbose", es->verbose); @@ -1178,9 +1166,7 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, /* save the flags of current EXPLAIN command */ CurrentDistributedQueryExplainOptions.costs = es->costs; CurrentDistributedQueryExplainOptions.buffers = es->buffers; -#if PG_VERSION_NUM >= PG_VERSION_13 CurrentDistributedQueryExplainOptions.wal = es->wal; -#endif CurrentDistributedQueryExplainOptions.verbose = es->verbose; CurrentDistributedQueryExplainOptions.summary = es->summary; CurrentDistributedQueryExplainOptions.timing = es->timing; @@ -1189,7 +1175,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, /* rest is copied from ExplainOneQuery() */ instr_time planstart, planduration; - #if PG_VERSION_NUM >= PG_VERSION_13 BufferUsage bufusage_start, bufusage; @@ -1197,7 +1182,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, { bufusage_start = pgBufferUsage; } - #endif INSTR_TIME_SET_CURRENT(planstart); @@ -1205,7 +1189,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, PlannedStmt *plan = pg_plan_query_compat(query, NULL, cursorOptions, params); INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); - #if PG_VERSION_NUM >= PG_VERSION_13 /* calc differences of buffer counters. */ if (es->buffers) @@ -1213,7 +1196,6 @@ CitusExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } - #endif /* run it (if needed) and produce output */ ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv, @@ -1467,17 +1449,12 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc, StringInfo explainOptions = makeStringInfo(); appendStringInfo(explainOptions, - "{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, " -#if PG_VERSION_NUM >= PG_VERSION_13 - "\"wal\": %s, " -#endif + "{\"verbose\": %s, \"costs\": %s, \"buffers\": %s, \"wal\": %s, " "\"timing\": %s, \"summary\": %s, \"format\": \"%s\"}", CurrentDistributedQueryExplainOptions.verbose ? "true" : "false", CurrentDistributedQueryExplainOptions.costs ? "true" : "false", CurrentDistributedQueryExplainOptions.buffers ? "true" : "false", -#if PG_VERSION_NUM >= PG_VERSION_13 CurrentDistributedQueryExplainOptions.wal ? "true" : "false", -#endif CurrentDistributedQueryExplainOptions.timing ? "true" : "false", CurrentDistributedQueryExplainOptions.summary ? "true" : "false", ExplainFormatStr(CurrentDistributedQueryExplainOptions.format)); @@ -1632,13 +1609,11 @@ ExplainOneQuery(Query *query, int cursorOptions, { instr_time planstart, planduration; - #if PG_VERSION_NUM >= PG_VERSION_13 BufferUsage bufusage_start, bufusage; if (es->buffers) bufusage_start = pgBufferUsage; - #endif INSTR_TIME_SET_CURRENT(planstart); /* plan the query */ @@ -1647,15 +1622,13 @@ ExplainOneQuery(Query *query, int cursorOptions, INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); - #if PG_VERSION_NUM >= PG_VERSION_13 - /* calc differences of buffer counters. */ if (es->buffers) { memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); } - #endif + /* run it (if needed) and produce output */ ExplainOnePlanCompat(plan, into, es, queryString, params, queryEnv, &planduration, (es->buffers ? &bufusage : NULL)); @@ -1696,10 +1669,10 @@ ExplainWorkerPlan(PlannedStmt *plannedstmt, DestReceiver *dest, ExplainState *es if (es->buffers) instrument_option |= INSTRUMENT_BUFFERS; -#if PG_VERSION_NUM >= PG_VERSION_13 + if (es->wal) instrument_option |= INSTRUMENT_WAL; -#endif + /* * We always collect timing for the entire statement, even when node-level * timing is off, so we don't look at es->timing here. (We could skip diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 57582ec97..dec3fee72 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1490,9 +1490,7 @@ MasterExtendedOpNode(MultiExtendedOp *originalOpNode, masterExtendedOpNode->hasDistinctOn = originalOpNode->hasDistinctOn; masterExtendedOpNode->limitCount = originalOpNode->limitCount; masterExtendedOpNode->limitOffset = originalOpNode->limitOffset; -#if PG_VERSION_NUM >= PG_VERSION_13 masterExtendedOpNode->limitOption = originalOpNode->limitOption; -#endif masterExtendedOpNode->havingQual = newHavingQual; if (!extendedOpNodeProperties->onlyPushableWindowFunctions) @@ -2489,14 +2487,12 @@ WorkerExtendedOpNode(MultiExtendedOp *originalOpNode, workerExtendedOpNode->windowClause = queryWindowClause.workerWindowClauseList; workerExtendedOpNode->sortClauseList = queryOrderByLimit.workerSortClauseList; workerExtendedOpNode->limitCount = queryOrderByLimit.workerLimitCount; -#if PG_VERSION_NUM >= PG_VERSION_13 /* * If the limitCount cannot be pushed down it will be NULL, so the deparser will * ignore the limitOption. */ workerExtendedOpNode->limitOption = originalOpNode->limitOption; -#endif return workerExtendedOpNode; } diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index 1535beb1e..857327742 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -1734,9 +1734,7 @@ MultiExtendedOpNode(Query *queryTree, Query *originalQuery) extendedOpNode->sortClauseList = queryTree->sortClause; extendedOpNode->limitCount = queryTree->limitCount; extendedOpNode->limitOffset = queryTree->limitOffset; -#if PG_VERSION_NUM >= PG_VERSION_13 extendedOpNode->limitOption = queryTree->limitOption; -#endif extendedOpNode->havingQual = queryTree->havingQual; extendedOpNode->distinctClause = queryTree->distinctClause; extendedOpNode->hasDistinctOn = queryTree->hasDistinctOn; diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index b8d87c4b7..df1f1bfcb 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -230,9 +230,7 @@ static List * FetchEqualityAttrNumsForRTEOpExpr(OpExpr *opExpr); static List * FetchEqualityAttrNumsForRTEBoolExpr(BoolExpr *boolExpr); static List * FetchEqualityAttrNumsForList(List *nodeList); static int PartitionColumnIndex(Var *targetVar, List *targetList); -#if PG_VERSION_NUM >= PG_VERSION_13 static List * GetColumnOriginalIndexes(Oid relationId); -#endif /* @@ -541,9 +539,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList) List *sortClauseList = NIL; Node *limitCount = NULL; Node *limitOffset = NULL; -#if PG_VERSION_NUM >= PG_VERSION_13 LimitOption limitOption = LIMIT_OPTION_DEFAULT; -#endif Node *havingQual = NULL; bool hasDistinctOn = false; List *distinctClause = NIL; @@ -625,9 +621,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList) limitCount = extendedOp->limitCount; limitOffset = extendedOp->limitOffset; -#if PG_VERSION_NUM >= PG_VERSION_13 limitOption = extendedOp->limitOption; -#endif sortClauseList = extendedOp->sortClauseList; havingQual = extendedOp->havingQual; } @@ -683,9 +677,7 @@ BuildJobQuery(MultiNode *multiNode, List *dependentJobList) jobQuery->groupClause = groupClauseList; jobQuery->limitOffset = limitOffset; jobQuery->limitCount = limitCount; -#if PG_VERSION_NUM >= PG_VERSION_13 jobQuery->limitOption = limitOption; -#endif jobQuery->havingQual = havingQual; jobQuery->hasAggs = contain_aggs_of_level((Node *) targetList, 0) || contain_aggs_of_level((Node *) havingQual, 0); @@ -1338,8 +1330,6 @@ static void SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry, Oid leftRelId, Oid rightRelId, List *leftColumnVars, List *rightColumnVars) { - #if PG_VERSION_NUM >= PG_VERSION_13 - /* We don't have any merged columns so set it to 0 */ rangeTableEntry->joinmergedcols = 0; @@ -1362,13 +1352,9 @@ SetJoinRelatedColumnsCompat(RangeTblEntry *rangeTableEntry, Oid leftRelId, Oid r int rightColsSize = list_length(rightColumnVars); rangeTableEntry->joinrightcols = GeneratePositiveIntSequenceList(rightColsSize); } - - #endif } -#if PG_VERSION_NUM >= PG_VERSION_13 - /* * GetColumnOriginalIndexes gets the original indexes of columns by taking column drops into account. */ @@ -1392,8 +1378,6 @@ GetColumnOriginalIndexes(Oid relationId) } -#endif - /* * ExtractRangeTableId gets the range table id from a node that could * either be a JoinExpr or RangeTblRef. diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index d750d09db..ed0dd8195 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -167,10 +167,7 @@ static bool FindQueryContainingRTEIdentityInternal(Node *node, FindQueryContainingRteIdentityContext * context); - -#if PG_VERSION_NUM >= PG_VERSION_13 static int ParentCountPriorToAppendRel(List *appendRelList, AppendRelInfo *appendRelInfo); -#endif /* @@ -398,12 +395,10 @@ SafeToPushdownUnionSubquery(Query *originalQuery, /* * RangeTableOffsetCompat returns the range table offset(in glob->finalrtable) for the appendRelInfo. - * For PG < 13 this is a no op. */ static int RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo) { - #if PG_VERSION_NUM >= PG_VERSION_13 int parentCount = ParentCountPriorToAppendRel(root->append_rel_list, appendRelInfo); int skipParentCount = parentCount - 1; @@ -434,9 +429,6 @@ RangeTableOffsetCompat(PlannerInfo *root, AppendRelInfo *appendRelInfo) */ int parentRelIndex = appendRelInfo->parent_relid - 1; return parentRelIndex - indexInRtable; - #else - return 0; - #endif } @@ -1482,8 +1474,6 @@ AddUnionAllSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass * } -#if PG_VERSION_NUM >= PG_VERSION_13 - /* * ParentCountPriorToAppendRel returns the number of parents that come before * the given append rel info. @@ -1506,8 +1496,6 @@ ParentCountPriorToAppendRel(List *appendRelList, AppendRelInfo *targetAppendRelI } -#endif - /* * AddUnionSetOperationsToAttributeEquivalenceClass recursively iterates on all the * setOperations and adds each corresponding target entry to the given equivalence diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c index 8f4821bc1..0949f3fbd 100644 --- a/src/backend/distributed/relay/relay_event_utility.c +++ b/src/backend/distributed/relay/relay_event_utility.c @@ -112,7 +112,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) break; } -#if PG_VERSION_NUM >= PG_VERSION_13 case T_AlterStatsStmt: { AlterStatsStmt *alterStatsStmt = (AlterStatsStmt *) parseTree; @@ -124,7 +123,6 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) break; } -#endif case T_AlterTableStmt: { diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 9b645ead4..d24a1c30c 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -17,13 +17,9 @@ #include "distributed/pg_version_constants.h" -#if PG_VERSION_NUM >= PG_VERSION_12 #include "access/genam.h" -#endif -#if PG_VERSION_NUM >= PG_VERSION_13 #include "postmaster/interrupt.h" -#endif #include "access/htup_details.h" #include "access/sysattr.h" @@ -2039,13 +2035,11 @@ WaitForMiliseconds(long timeout) CHECK_FOR_INTERRUPTS(); } - #if PG_VERSION_NUM >= PG_VERSION_13 if (ConfigReloadPending) { ConfigReloadPending = false; ProcessConfigFile(PGC_SIGHUP); } - #endif } diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index 53a598e23..9fdb226e1 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -29,9 +29,7 @@ #include "distributed/metadata_cache.h" #include "distributed/relation_access_tracking.h" #include "utils/hsearch.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "utils/lsyscache.h" diff --git a/src/backend/distributed/utils/citus_outfuncs.c b/src/backend/distributed/utils/citus_outfuncs.c index 1aa7d8261..c0748ede7 100644 --- a/src/backend/distributed/utils/citus_outfuncs.c +++ b/src/backend/distributed/utils/citus_outfuncs.c @@ -323,9 +323,7 @@ OutMultiExtendedOp(OUTFUNC_ARGS) WRITE_NODE_FIELD(sortClauseList); WRITE_NODE_FIELD(limitCount); WRITE_NODE_FIELD(limitOffset); -#if PG_VERSION_NUM >= PG_VERSION_13 WRITE_ENUM_FIELD(limitOption, LimitOption); -#endif WRITE_NODE_FIELD(havingQual); WRITE_BOOL_FIELD(hasDistinctOn); WRITE_NODE_FIELD(distinctClause); diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index 84ff21f8c..f10a0dc7a 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -29,9 +29,7 @@ #include "storage/lockdefs.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "utils/inval.h" #include "utils/memutils.h" diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index de6abef9e..42a313b26 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -53,9 +53,7 @@ #include "storage/lmgr.h" #include "storage/lwlock.h" #include "tcop/tcopprot.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "utils/memutils.h" #include "utils/lsyscache.h" diff --git a/src/backend/distributed/utils/task_execution_utils.c b/src/backend/distributed/utils/task_execution_utils.c index 902b483fd..df31fd5c1 100644 --- a/src/backend/distributed/utils/task_execution_utils.c +++ b/src/backend/distributed/utils/task_execution_utils.c @@ -8,9 +8,7 @@ #include "distributed/pg_version_constants.h" -#if PG_VERSION_NUM >= PG_VERSION_13 #include "common/hashfn.h" -#endif #include "commands/dbcommands.h" #include "distributed/citus_custom_scan.h" diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 14166e30b..53edac74a 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -18,9 +18,6 @@ #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/pg_depend.h" -#if PG_VERSION_NUM < PG_VERSION_13 -#include "catalog/pg_depend_d.h" -#endif #include "catalog/pg_foreign_server.h" #include "distributed/citus_ruleutils.h" #include "distributed/distribution_column.h" @@ -43,10 +40,6 @@ PG_FUNCTION_INFO_V1(worker_drop_shell_table); PG_FUNCTION_INFO_V1(worker_drop_sequence_dependency); static void WorkerDropDistributedTable(Oid relationId); -#if PG_VERSION_NUM < PG_VERSION_13 -static long deleteDependencyRecordsForSpecific(Oid classId, Oid objectId, char deptype, - Oid refclassId, Oid refobjectId); -#endif /* @@ -131,11 +124,7 @@ WorkerDropDistributedTable(Oid relationId) ObjectAddressSet(*distributedTableObject, RelationRelationId, relationId); /* Drop dependent sequences from pg_dist_object */ - #if PG_VERSION_NUM >= PG_VERSION_13 List *ownedSequences = getOwnedSequences(relationId); - #else - List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); - #endif Oid ownedSequenceOid = InvalidOid; foreach_oid(ownedSequenceOid, ownedSequences) @@ -247,11 +236,7 @@ worker_drop_shell_table(PG_FUNCTION_ARGS) } /* Drop dependent sequences from pg_dist_object */ - #if PG_VERSION_NUM >= PG_VERSION_13 List *ownedSequences = getOwnedSequences(relationId); - #else - List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); - #endif Oid ownedSequenceOid = InvalidOid; foreach_oid(ownedSequenceOid, ownedSequences) @@ -299,11 +284,7 @@ worker_drop_sequence_dependency(PG_FUNCTION_ARGS) EnsureTableOwner(relationId); /* break the dependent sequences from the table */ - #if PG_VERSION_NUM >= PG_VERSION_13 List *ownedSequences = getOwnedSequences(relationId); - #else - List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); - #endif Oid ownedSequenceOid = InvalidOid; foreach_oid(ownedSequenceOid, ownedSequences) @@ -322,59 +303,3 @@ worker_drop_sequence_dependency(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - - -/* *INDENT-OFF* */ -#if PG_VERSION_NUM < PG_VERSION_13 - -/* - * This function is already available on PG 13+. - * deleteDependencyRecordsForSpecific -- delete all records with given depender - * classId/objectId, dependee classId/objectId, of the given deptype. - * Returns the number of records deleted. - */ -static long -deleteDependencyRecordsForSpecific(Oid classId, Oid objectId, char deptype, - Oid refclassId, Oid refobjectId) -{ - long count = 0; - Relation depRel; - ScanKeyData key[2]; - HeapTuple tup; - - depRel = table_open(DependRelationId, RowExclusiveLock); - - ScanKeyInit(&key[0], - Anum_pg_depend_classid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(classId)); - ScanKeyInit(&key[1], - Anum_pg_depend_objid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(objectId)); - - SysScanDesc scan = - systable_beginscan(depRel, DependDependerIndexId, true, - NULL, 2, key); - - while (HeapTupleIsValid(tup = systable_getnext(scan))) - { - Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup); - - if (depform->refclassid == refclassId && - depform->refobjid == refobjectId && - depform->deptype == deptype) - { - CatalogTupleDelete(depRel, &tup->t_self); - count++; - } - } - - systable_endscan(scan); - - table_close(depRel, RowExclusiveLock); - - return count; -} -#endif -/* *INDENT-ON* */ diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index 611b40d15..c40aa6236 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -50,8 +50,4 @@ #define ExplainPropertyLong(qlabel, value, es) \ ExplainPropertyInteger(qlabel, NULL, value, es) -#if PG_VERSION_NUM < 130000 -#define detoast_attr(X) heap_tuple_untoast_attr(X) -#endif - #endif /* COLUMNAR_COMPAT_H */ diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h index c74679e7f..ab1787fbf 100644 --- a/src/include/distributed/distributed_planner.h +++ b/src/include/distributed/distributed_planner.h @@ -199,16 +199,10 @@ typedef struct CitusCustomScanPath } CitusCustomScanPath; -#if PG_VERSION_NUM >= PG_VERSION_13 extern PlannedStmt * distributed_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); -#else -extern PlannedStmt * distributed_planner(Query *parse, - int cursorOptions, - ParamListInfo boundParams); -#endif /* diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index e4a185b4d..c3facf76f 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -145,10 +145,7 @@ typedef struct ListCellAndListWrapper * * For more information, see postgres commit with sha * 1cff1b95ab6ddae32faa3efe0d95a820dbfdc164 - */ -#if PG_VERSION_NUM >= PG_VERSION_13 - -/* + * * How it works: * - An index is declared with the name {var}PositionDoNotUse and used * throughout the for loop using ## to concat. @@ -162,9 +159,6 @@ typedef struct ListCellAndListWrapper (var ## PositionDoNotUse) < list_length(l) && \ (((var) = list_nth(l, var ## PositionDoNotUse)) || true); \ var ## PositionDoNotUse ++) -#else -#define foreach_ptr_append(var, l) foreach_ptr(var, l) -#endif /* utility functions declaration shared within this module */ extern List * SortList(List *pointerList, diff --git a/src/include/distributed/multi_logical_planner.h b/src/include/distributed/multi_logical_planner.h index 69da17aca..189170358 100644 --- a/src/include/distributed/multi_logical_planner.h +++ b/src/include/distributed/multi_logical_planner.h @@ -178,9 +178,7 @@ typedef struct MultiExtendedOp List *sortClauseList; Node *limitCount; Node *limitOffset; -#if PG_VERSION_NUM >= PG_VERSION_13 LimitOption limitOption; -#endif Node *havingQual; List *distinctClause; List *windowClause; diff --git a/src/include/distributed/pg_version_constants.h b/src/include/distributed/pg_version_constants.h index 6595c0c28..b3c1a0ed8 100644 --- a/src/include/distributed/pg_version_constants.h +++ b/src/include/distributed/pg_version_constants.h @@ -11,7 +11,6 @@ #ifndef PG_VERSION_CONSTANTS #define PG_VERSION_CONSTANTS -#define PG_VERSION_12 120000 #define PG_VERSION_13 130000 #define PG_VERSION_14 140000 #define PG_VERSION_15 150000 diff --git a/src/include/distributed/version_compat.h b/src/include/distributed/version_compat.h index 81ab508fd..b990b82ef 100644 --- a/src/include/distributed/version_compat.h +++ b/src/include/distributed/version_compat.h @@ -24,14 +24,10 @@ #include "parser/parse_func.h" #include "optimizer/optimizer.h" -#if (PG_VERSION_NUM >= PG_VERSION_13) #include "tcop/tcopprot.h" -#endif #include "pg_version_compat.h" -#if PG_VERSION_NUM >= PG_VERSION_12 - typedef struct { File fd; @@ -76,6 +72,4 @@ FileCompatFromFileStart(File fileDesc) } -#endif /* PG12 */ - #endif /* VERSION_COMPAT_H */ diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 9c9f4aa34..8fa1f1814 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -123,7 +123,6 @@ RelationGetSmgr(Relation rel) #define ROLE_PG_READ_ALL_STATS DEFAULT_ROLE_READ_ALL_STATS #endif -#if PG_VERSION_NUM >= PG_VERSION_13 #define lnext_compat(l, r) lnext(l, r) #define list_delete_cell_compat(l, c, p) list_delete_cell(l, c) #define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, q, c, b) @@ -137,23 +136,6 @@ RelationGetSmgr(Relation rel) #define SetListCellPtr(a, b) ((a)->ptr_value = (b)) #define RangeTableEntryFromNSItem(a) ((a)->p_rte) #define QueryCompletionCompat QueryCompletion -#else /* pre PG13 */ -#define lnext_compat(l, r) lnext(r) -#define list_delete_cell_compat(l, c, p) list_delete_cell(l, c, p) -#define pg_plan_query_compat(p, q, c, b) pg_plan_query(p, c, b) -#define planner_compat(p, c, b) planner(p, c, b) -#define standard_planner_compat(a, c, d) standard_planner(a, c, d) -#define CMDTAG_SELECT_COMPAT "SELECT" -#define GetSequencesOwnedByRelation(a) getOwnedSequences(a, InvalidAttrNumber) -#define GetSequencesOwnedByColumn(a, b) getOwnedSequences(a, b) -#define ExplainOnePlanCompat(a, b, c, d, e, f, g, h) ExplainOnePlan(a, b, c, d, e, f, g) -#define SetListCellPtr(a, b) ((a)->data.ptr_value = (b)) -#define RangeTableEntryFromNSItem(a) (a) -#define QueryCompletionCompat char -#define varattnosyn varoattno -#define varnosyn varnoold -#endif -#if PG_VERSION_NUM >= PG_VERSION_12 #define CreateTableSlotForRel(rel) table_slot_create(rel, NULL) #define MakeSingleTupleTableSlotCompat MakeSingleTupleTableSlot @@ -172,8 +154,6 @@ RelationGetSmgr(Relation rel) #define fcSetArgExt(fc, n, val, is_null) \ (((fc)->args[n].isnull = (is_null)), ((fc)->args[n].value = (val))) -#endif /* PG12 */ - #define fcSetArg(fc, n, value) fcSetArgExt(fc, n, value, false) #define fcSetArgNull(fc, n) fcSetArgExt(fc, n, (Datum) 0, true) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 2fbb026ed..61d66ac37 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -75,9 +75,6 @@ s/(job_[0-9]+\/task_[0-9]+\/p_[0-9]+\.)[0-9]+/\1xxxx/g # isolation_ref2ref_foreign_keys s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g -# pg11/pg12 varies in isolation debug output -s/s1: DEBUG:/DEBUG:/g - # commands cascading to shard relations s/(NOTICE: .*_)[0-9]{5,}( CASCADE)/\1xxxxx\2/g s/(NOTICE: [a-z]+ cascades to table ".*)_[0-9]{5,}"/\1_xxxxx"/g @@ -93,30 +90,15 @@ s/connectionId: [0-9]+/connectionId: xxxxxxx/g s/ *$//g # pg12 changes -s/Partitioned table "/Table "/g -s/\) TABLESPACE pg_default$/\)/g -s/invalid input syntax for type bigint:/invalid input syntax for integer:/g -s/invalid input syntax for type /invalid input syntax for /g -s/_id_ref_id_fkey/_id_fkey/g -s/_ref_id_id_fkey_/_ref_id_fkey_/g -s/fk_test_2_col1_col2_fkey/fk_test_2_col1_fkey/g -s/_id_other_column_ref_fkey/_id_fkey/g s/"(collections_list_|collection_users_|collection_users_fkey_)[0-9]+"/"\1xxxxxxx"/g # pg13 changes s/of relation ".*" violates not-null constraint/violates not-null constraint/g -s/varnosyn/varnoold/g -s/varattnosyn/varoattno/g /DEBUG: index ".*" can safely use deduplication.*$/d /DEBUG: index ".*" cannot use deduplication.*$/d /DEBUG: building index ".*" on table ".*" serially.*$/d s/partition ".*" would be violated by some row/partition would be violated by some row/g -/.*Peak Memory Usage:.*$/d s/of relation ".*" contains null values/contains null values/g -s/of relation "t1" is violated by some row/is violated by some row/g - -# pg13.1 changes -s/^ERROR: insufficient columns in PRIMARY KEY constraint definition$/ERROR: unique constraint on partitioned table must include all partitioning columns/g # intermediate_results s/(ERROR.*)pgsql_job_cache\/([0-9]+_[0-9]+_[0-9]+)\/(.*).data/\1pgsql_job_cache\/xx_x_xxx\/\3.data/g @@ -157,21 +139,6 @@ s/Citus.*currently supports/Citus currently supports/g s/prepared transaction with identifier .* does not exist/prepared transaction with identifier "citus_x_yyyyyy_zzz_w" does not exist/g s/failed to roll back prepared transaction '.*'/failed to roll back prepared transaction 'citus_x_yyyyyy_zzz_w'/g -# Table aliases for partitioned tables in explain outputs might change -# regardless of postgres appended an _int suffix to alias, we always append _xxx suffix -# Can be removed when we remove support for pg11 and pg12. -# "-> Scan on __ _" and -# "-> Scan on __ " becomes -# "-> Scan on __ _xxx" -s/(->.*Scan on\ +)(.*)(_[0-9]+)(_[0-9]+) \2(_[0-9]+|_xxx)?/\1\2\3\4 \2_xxx/g - -# Table aliases for partitioned tables in "Hash Cond:" lines of explain outputs might change -# This is only for multi_partitioning.sql test file -# regardless of postgres appended an _int suffix to alias, we always append _xxx suffix -# Can be removed when we remove support for pg11 and pg12. -s/(partitioning_hash_join_test)(_[0-9]|_xxx)?(\.[a-zA-Z]+)/\1_xxx\3/g -s/(partitioning_hash_test)(_[0-9]|_xxx)?(\.[a-zA-Z]+)/\1_xxx\3/g - # Errors with binary decoding where OIDs should be normalized s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/g diff --git a/src/test/regress/expected/alter_distributed_table.out b/src/test/regress/expected/alter_distributed_table.out index 3ca4d934a..0fb80a3fc 100644 --- a/src/test/regress/expected/alter_distributed_table.out +++ b/src/test/regress/expected/alter_distributed_table.out @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven; - server_version_above_eleven ---------------------------------------------------------------------- - t -(1 row) - -\gset CREATE SCHEMA alter_distributed_table; SET search_path TO alter_distributed_table; SET citus.shard_count TO 4; @@ -469,7 +461,6 @@ SELECT alter_distributed_table('col_with_ref_to_dist', shard_count:=6, cascade_t (1 row) -\if :server_version_above_eleven -- test altering columnar table CREATE TABLE columnar_table (a INT) USING columnar; SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none'); @@ -496,7 +487,6 @@ SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHE columnar_table | 6 | columnar (1 row) -\endif -- test complex cascade operations CREATE TABLE cas_1 (a INT UNIQUE); CREATE TABLE cas_2 (a INT UNIQUE); diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out index 7165877d5..81185fe9b 100644 --- a/src/test/regress/expected/alter_table_set_access_method.out +++ b/src/test/regress/expected/alter_table_set_access_method.out @@ -12,13 +12,6 @@ NOTICE: renaming the new table to public.alter_am_pg_version_table (1 row) DROP TABLE alter_am_pg_version_table; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif CREATE SCHEMA alter_table_set_access_method; SET search_path TO alter_table_set_access_method; SET citus.shard_count TO 4; diff --git a/src/test/regress/expected/citus_local_tables.out b/src/test/regress/expected/citus_local_tables.out index 1c95f578b..da9ba8e1d 100644 --- a/src/test/regress/expected/citus_local_tables.out +++ b/src/test/regress/expected/citus_local_tables.out @@ -641,7 +641,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'citus_local_table_4'::regclass; SELECT column_name_to_column('citus_local_table_4', 'a'); column_name_to_column --------------------------------------------------------------------- - {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} (1 row) SELECT master_update_shard_statistics(shardid) diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index 8d7a3fd39..27424c7d8 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -769,8 +769,8 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition ORDER BY logicalrelid; logicalrelid | partmethod | partkey --------------------------------------------------------------------- - parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} - parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location -1} + parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} + parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1} (2 rows) -- some tests for view propagation on citus local tables diff --git a/src/test/regress/expected/columnar_truncate.out b/src/test/regress/expected/columnar_truncate.out index 4dd93f5fa..79c236229 100644 --- a/src/test/regress/expected/columnar_truncate.out +++ b/src/test/regress/expected/columnar_truncate.out @@ -1,14 +1,6 @@ -- -- Test the TRUNCATE TABLE command for columnar tables. -- --- print whether we're using version > 10 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - version_above_ten ---------------------------------------------------------------------- - t -(1 row) - -- CREATE a columnar table, fill with some data -- CREATE TABLE columnar_truncate_test (a int, b int) USING columnar; CREATE TABLE columnar_truncate_test_second (a int, b int) USING columnar; diff --git a/src/test/regress/expected/columnar_types_without_comparison.out b/src/test/regress/expected/columnar_types_without_comparison.out index df1e141d9..076ec9bb5 100644 --- a/src/test/regress/expected/columnar_types_without_comparison.out +++ b/src/test/regress/expected/columnar_types_without_comparison.out @@ -74,7 +74,7 @@ INSERT INTO test_lseg VALUES ('( 1 , 2 ) , ( 3 , 4 )'); SELECT minimum_value, maximum_value FROM columnar.chunk; minimum_value | maximum_value --------------------------------------------------------------------- - | + | (1 row) SELECT * FROM test_lseg WHERE a = '( 1 , 2 ) , ( 3 , 4 )'; @@ -151,13 +151,6 @@ SELECT * FROM test_user_defined_color WHERE a = 'red'; DROP TABLE test_user_defined_color; DROP TYPE user_defined_color; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif -- pg_snapshot CREATE TABLE test_pg_snapshot (a pg_snapshot) USING columnar; INSERT INTO test_pg_snapshot VALUES ('10:20:10,14,15'); diff --git a/src/test/regress/expected/columnar_types_without_comparison_0.out b/src/test/regress/expected/columnar_types_without_comparison_0.out deleted file mode 100644 index b4f49e30c..000000000 --- a/src/test/regress/expected/columnar_types_without_comparison_0.out +++ /dev/null @@ -1,159 +0,0 @@ --- --- Testing data types without comparison operators --- If a data type doesn't have comparison operators, we should store NULL for min/max values --- Verify that (1) min/max entries in columnar.chunk is NULL as expected --- (2) we can run queries which has equality conditions in WHERE clause for that column with correct results --- --- varchar -CREATE TABLE test_varchar (a varchar) USING columnar; -INSERT INTO test_varchar VALUES ('Hello'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_varchar WHERE a = 'Hello'; - a ---------------------------------------------------------------------- - Hello -(1 row) - -DROP TABLE test_varchar; --- cidr -CREATE TABLE test_cidr (a cidr) USING columnar; -INSERT INTO test_cidr VALUES ('192.168.100.128/25'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_cidr WHERE a = '192.168.100.128/25'; - a ---------------------------------------------------------------------- - 192.168.100.128/25 -(1 row) - -DROP TABLE test_cidr; --- json -CREATE TABLE test_json (a json) USING columnar; -INSERT INTO test_json VALUES ('5'::json); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_json WHERE a::text = '5'::json::text; - a ---------------------------------------------------------------------- - 5 -(1 row) - -DROP TABLE test_json; --- line -CREATE TABLE test_line (a line) USING columnar; -INSERT INTO test_line VALUES ('{1, 2, 3}'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_line WHERE a = '{1, 2, 3}'; - a ---------------------------------------------------------------------- - {1,2,3} -(1 row) - -DROP TABLE test_line; --- lseg -CREATE TABLE test_lseg (a lseg) USING columnar; -INSERT INTO test_lseg VALUES ('( 1 , 2 ) , ( 3 , 4 )'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_lseg WHERE a = '( 1 , 2 ) , ( 3 , 4 )'; - a ---------------------------------------------------------------------- - [(1,2),(3,4)] -(1 row) - -DROP TABLE test_lseg; --- path -CREATE TABLE test_path (a path) USING columnar; -INSERT INTO test_path VALUES ('( 1 , 2 ) , ( 3 , 4 ) , ( 5 , 6 )'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_path WHERE a = '( 1 , 2 ) , ( 3 , 4 ) , ( 5 , 6 )'; - a ---------------------------------------------------------------------- - ((1,2),(3,4),(5,6)) -(1 row) - -DROP TABLE test_path; --- txid_snapshot -CREATE TABLE test_txid_snapshot (a txid_snapshot) USING columnar; -INSERT INTO test_txid_snapshot VALUES ('10:20:10,14,15'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_txid_snapshot WHERE a::text = '10:20:10,14,15'::txid_snapshot::text; - a ---------------------------------------------------------------------- - 10:20:10,14,15 -(1 row) - -DROP TABLE test_txid_snapshot; --- xml -CREATE TABLE test_xml (a xml) USING columnar; -INSERT INTO test_xml VALUES ('bar'::xml); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_xml WHERE a::text = 'bar'::xml::text; - a ---------------------------------------------------------------------- - bar -(1 row) - -DROP TABLE test_xml; --- user defined -CREATE TYPE user_defined_color AS ENUM ('red', 'orange', 'yellow', - 'green', 'blue', 'purple'); -CREATE TABLE test_user_defined_color (a user_defined_color) USING columnar; -INSERT INTO test_user_defined_color VALUES ('red'); -SELECT minimum_value, maximum_value FROM columnar.chunk; - minimum_value | maximum_value ---------------------------------------------------------------------- - | -(1 row) - -SELECT * FROM test_user_defined_color WHERE a = 'red'; - a ---------------------------------------------------------------------- - red -(1 row) - -DROP TABLE test_user_defined_color; -DROP TYPE user_defined_color; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q \ No newline at end of file diff --git a/src/test/regress/expected/cte_inline.out b/src/test/regress/expected/cte_inline.out index 34bdc8fd2..072f076d6 100644 --- a/src/test/regress/expected/cte_inline.out +++ b/src/test/regress/expected/cte_inline.out @@ -9,15 +9,6 @@ SELECT create_distributed_table ('test_table', 'key'); (1 row) INSERT INTO test_table SELECT i % 10, 'test' || i, row_to_json(row(i, i*18, 'test' || i)) FROM generate_series (0, 100) i; --- server version because CTE inlining might produce --- different debug messages in PG 11 vs PG 12 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 12; - ?column? ---------------------------------------------------------------------- - t -(1 row) - SET client_min_messages TO DEBUG; -- Citus should not inline this CTE because otherwise it cannot -- plan the query diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index bf92d680a..516ff97e6 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -561,7 +561,7 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$3' ERROR: cannot distribute the function "eq_with_param_names" since the distribution argument is not valid HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$1a'); -ERROR: invalid input syntax for integer: "1a" +ERROR: invalid input syntax for type integer: "1a" -- non existing column name SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', 'aaa'); ERROR: cannot distribute the function "eq_with_param_names" since the distribution argument is not valid diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index abc2c88d0..d960abafa 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven ---------------------------------------------------------------------- - t -(1 row) - SET citus.next_shard_id TO 20040000; CREATE SCHEMA xact_enum_type; SET search_path TO xact_enum_type; diff --git a/src/test/regress/expected/follower_single_node.out b/src/test/regress/expected/follower_single_node.out index 4c7a03954..5c26e55e1 100644 --- a/src/test/regress/expected/follower_single_node.out +++ b/src/test/regress/expected/follower_single_node.out @@ -1,6 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 12 AS have_table_am -\gset \c - - - :master_port CREATE SCHEMA single_node; SET search_path TO single_node; @@ -390,11 +387,7 @@ SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y); RESET citus.task_assignment_policy; -- Simple columnar follower test \c -reuse-previous=off regression - - :master_port -\if :have_table_am CREATE TABLE columnar_test (a int, b int) USING columnar; -\else -CREATE TABLE columnar_test (a int, b int); -\endif INSERT INTO columnar_test(a, b) VALUES (1, 1); INSERT INTO columnar_test(a, b) VALUES (1, 2); TRUNCATE columnar_test; diff --git a/src/test/regress/expected/grant_on_foreign_server_propagation.out b/src/test/regress/expected/grant_on_foreign_server_propagation.out index 90a1e2468..b98130404 100644 --- a/src/test/regress/expected/grant_on_foreign_server_propagation.out +++ b/src/test/regress/expected/grant_on_foreign_server_propagation.out @@ -1,14 +1,6 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif -- -- GRANT_ON_FOREIGN_SERVER_PROPAGATION --- We can't execute this file for PG12, as 'password_required' option for USER MAPPING --- is introduced in PG13. +-- 'password_required' option for USER MAPPING is introduced in PG13. -- CREATE SCHEMA "grant on server"; SET search_path TO "grant on server"; diff --git a/src/test/regress/expected/grant_on_foreign_server_propagation_0.out b/src/test/regress/expected/grant_on_foreign_server_propagation_0.out deleted file mode 100644 index e25fbb82d..000000000 --- a/src/test/regress/expected/grant_on_foreign_server_propagation_0.out +++ /dev/null @@ -1,6 +0,0 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out index b8b028ef1..986dddc34 100644 --- a/src/test/regress/expected/intermediate_results.out +++ b/src/test/regress/expected/intermediate_results.out @@ -124,7 +124,7 @@ SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series (1 row) SELECT * FROM read_intermediate_result('squares', 'csv') AS res (x int, x2 int); -ERROR: invalid input syntax for integer: "PGCOPY" +ERROR: invalid input syntax for type integer: "PGCOPY" END; -- try a composite type CREATE TYPE intermediate_results.square_type AS (x text, x2 int); diff --git a/src/test/regress/expected/isolation_select_for_update.out b/src/test/regress/expected/isolation_select_for_update.out index 9965497f2..d43e9323f 100644 --- a/src/test/regress/expected/isolation_select_for_update.out +++ b/src/test/regress/expected/isolation_select_for_update.out @@ -295,13 +295,13 @@ starting permutation: s1-begin s1-select-from-t1-with-subquery s2-begin s2-updat step s1-begin: BEGIN; -DEBUG: Creating router plan +s1: DEBUG: Creating router plan step s1-select-from-t1-with-subquery: SET client_min_messages TO DEBUG2; SELECT * FROM (SELECT * FROM test_table_1_rf1 FOR UPDATE) foo WHERE id = 1; RESET client_min_messages; -DEBUG: query has a single distribution column value: 1 +s1: DEBUG: query has a single distribution column value: 1 id|val_1 --------------------------------------------------------------------- 1| 2 diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index a620758c0..c8f518807 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -543,7 +543,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -ERROR: invalid input syntax for integer: "non-int" +ERROR: invalid input syntax for type integer: "non-int" ROLLBACK; -- shardMinValue should be smaller than shardMaxValue BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index b7e0778e6..6b3a333af 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -70,7 +70,7 @@ SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; partmethod | partkey --------------------------------------------------------------------- - h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} + h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} (1 row) -- use a bad shard count diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index bb85ed3d4..a7c53e293 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -2947,7 +2947,7 @@ Custom Scan (Citus Adaptive) -> Insert on users_table_2_570028 citus_table_alias Conflict Resolution: UPDATE Conflict Arbiter Indexes: users_table_2_pkey_570028 - -> Seq Scan on users_table_2_570028 users_table_xxx + -> Seq Scan on users_table_2_570028 users_table_2 EXPLAIN :default_analyze_flags execute p4(20,20); Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Task Count: 1 @@ -2959,7 +2959,7 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1) Conflict Arbiter Indexes: users_table_2_pkey_570028 Tuples Inserted: 0 Conflicting Tuples: 0 - -> Seq Scan on users_table_2_570028 users_table_xxx (actual rows=0 loops=1) + -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1) -- simple test to confirm we can fetch long (>4KB) plans EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM users_table_2 WHERE value_1::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'; Custom Scan (Citus Adaptive) (actual rows=0 loops=1) @@ -2969,7 +2969,7 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1) -> Task Tuple data received from node: 0 bytes Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on users_table_2_570028 users_table_xxx (actual rows=0 loops=1) + -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1) Filter: ((value_1)::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'::text) -- sorted explain analyze output CREATE TABLE explain_analyze_execution_time (a int); @@ -3030,7 +3030,7 @@ WindowAgg (actual rows=1 loops=1) Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) + -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1) CREATE TABLE distributed_table_2(a int, b int); SELECT create_distributed_table('distributed_table_2','a'); @@ -3051,7 +3051,7 @@ Limit (actual rows=1 loops=1) Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) + -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1) Task Count: 2 Tuple data received from nodes: 16 bytes Tasks Shown: One of 2 @@ -3062,7 +3062,7 @@ Limit (actual rows=1 loops=1) -> Nested Loop (actual rows=1 loops=1) Join Filter: (distributed_table_2.b = intermediate_result.r) -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1) - -> Seq Scan on distributed_table_2_570034 distributed_table_xxx (actual rows=1 loops=1) + -> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1) EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery; Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Task Count: 1 @@ -3077,7 +3077,7 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Tasks Shown: One of 2 -> Task Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) + -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1) CREATE TYPE multi_explain.int_wrapper_type AS (int_field int); CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type); SELECT create_distributed_table('tbl', 'a'); diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 34aa1c2cc..5d9d3f45a 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -5,16 +5,8 @@ -- -- It'd be nice to script generation of this file, but alas, that's -- not done yet. --- differentiate the output file for pg11 and versions above, with regards to objects --- created per citus version depending on the postgres version. Upgrade tests verify the --- objects are added in citus_finish_pg_upgrade() -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven ---------------------------------------------------------------------- - t -(1 row) - +-- +-- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() SET citus.next_shard_id TO 580000; CREATE SCHEMA multi_extension; SELECT $definition$ @@ -952,7 +944,7 @@ DELETE FROM pg_dist_shard WHERE shardid = 1; CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id); CREATE TABLE orders_2020_07_01 PARTITION OF e_transactions FOR VALUES IN (1,2,3); -INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}', 7, 's'); +INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's'); SELECT (metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11, (metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out index e0da97847..70863fb87 100644 --- a/src/test/regress/expected/multi_foreign_key.out +++ b/src/test/regress/expected/multi_foreign_key.out @@ -303,7 +303,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); UPDATE referenced_table SET test_column = 10 WHERE id = 1; -ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350385" on table "referencing_table_xxxxxxx" +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_id_fkey_1350385" on table "referencing_table_xxxxxxx" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". CONTEXT: while executing command on localhost:xxxxx BEGIN; @@ -343,7 +343,7 @@ INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; -ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350449" on table "referencing_table_xxxxxxx" +ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_id_fkey_1350449" on table "referencing_table_xxxxxxx" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_xxxxxxx". CONTEXT: while executing command on localhost:xxxxx UPDATE referencing_table SET id = 20 WHERE ref_id = 1; @@ -404,7 +404,7 @@ SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); (1 row) INSERT INTO referencing_table VALUES(null, 2); -ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_fkey_1350600" +ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_ref_id_id_fkey_1350600" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:xxxxx SELECT * FROM referencing_table; @@ -787,7 +787,7 @@ SELECT create_distributed_table('self_referencing_table1', 'id', 'hash'); INSERT INTO self_referencing_table1 VALUES(1, 1, 1); -- we expect this query to fail INSERT INTO self_referencing_table1 VALUES(1, 2, 3); -ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_fkey_1350640" +ERROR: insert or update on table "self_referencing_table1_1350640" violates foreign key constraint "self_referencing_table1_id_other_column_ref_fkey_1350640" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350640". CONTEXT: while executing command on localhost:xxxxx -- verify that rows are actually inserted diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 207daef16..280a174da 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -463,7 +463,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 2 | s | f + mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; @@ -562,7 +562,7 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; Constraint | Definition --------------------------------------------------------------------- - fk_test_2_col1_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) + fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) (1 row) \c - - - :master_port @@ -602,7 +602,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 2 | s | f + mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 618c563de..6c8365f85 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -451,7 +451,7 @@ SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; -- create default partition CREATE TABLE partitioning_test_default PARTITION OF partitioning_test DEFAULT; \d+ partitioning_test - Table "public.partitioning_test" + Partitioned table "public.partitioning_test" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | | | plain | | diff --git a/src/test/regress/expected/multi_partitioning_utils.out b/src/test/regress/expected/multi_partitioning_utils.out index 7a502aeff..2368e920f 100644 --- a/src/test/regress/expected/multi_partitioning_utils.out +++ b/src/test/regress/expected/multi_partitioning_utils.out @@ -119,7 +119,7 @@ SELECT generate_alter_table_attach_partition_command('date_partition_2007'); -- detach and attach the partition by the command generated by us \d+ date_partitioned_table - Table "public.date_partitioned_table" + Partitioned table "public.date_partitioned_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | | | plain | | @@ -136,7 +136,7 @@ SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_tabl -- check that both partitions are visiable \d+ date_partitioned_table - Table "public.date_partitioned_table" + Partitioned table "public.date_partitioned_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | | | plain | | @@ -160,7 +160,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_ -- the hierarcy is successfully created \d+ date_partitioned_table_100 - Table "public.date_partitioned_table_100" + Partitioned table "public.date_partitioned_table_100" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | | | plain | | @@ -187,7 +187,7 @@ SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_ -- the hierarcy is successfully broken \d+ date_partitioned_table_100 - Table "public.date_partitioned_table_100" + Partitioned table "public.date_partitioned_table_100" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | | | plain | | @@ -243,7 +243,7 @@ SELECT public.generate_alter_table_attach_partition_command('child_2'); SET search_path = 'partition_parent_schema'; -- detach and attach the partition by the command generated by us \d+ parent_table - Table "partition_parent_schema.parent_table" + Partitioned table "partition_parent_schema.parent_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | not null | | plain | | @@ -260,7 +260,7 @@ SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'p -- check that both partitions are visiable \d+ parent_table - Table "partition_parent_schema.parent_table" + Partitioned table "partition_parent_schema.parent_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------------------------------------------------------------------- id | integer | | not null | | plain | | diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out index 26adfa0c4..7bbfaeb88 100644 --- a/src/test/regress/expected/multi_prune_shard_list.out +++ b/src/test/regress/expected/multi_prune_shard_list.out @@ -86,7 +86,7 @@ SELECT prune_using_both_values('pruning', 'tomato', 'rose'); SELECT debug_equality_expression('pruning'); debug_equality_expression --------------------------------------------------------------------- - {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} + {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} (1 row) -- print the initial ordering of shard intervals diff --git a/src/test/regress/expected/multi_tenant_isolation.out b/src/test/regress/expected/multi_tenant_isolation.out index baf2869bc..2a3b658b5 100644 --- a/src/test/regress/expected/multi_tenant_isolation.out +++ b/src/test/regress/expected/multi_tenant_isolation.out @@ -182,7 +182,7 @@ ERROR: cannot isolate tenant because "lineitem_streaming" has colocated tables HINT: Use CASCADE option to isolate tenants for the colocated tables too. Example usage: isolate_tenant_to_new_shard('lineitem_streaming', '100', 'CASCADE') -- check with an input not castable to bigint SELECT isolate_tenant_to_new_shard('lineitem_streaming', 'abc', 'CASCADE'); -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type bigint: "abc" SELECT isolate_tenant_to_new_shard('lineitem_streaming', 100, 'CASCADE'); isolate_tenant_to_new_shard --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_truncate.out b/src/test/regress/expected/multi_truncate.out index b010a2c5d..5553a0ecd 100644 --- a/src/test/regress/expected/multi_truncate.out +++ b/src/test/regress/expected/multi_truncate.out @@ -499,9 +499,10 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut (1 row) ALTER TABLE t1 ADD CONSTRAINT t1_a_check CHECK(a > 2) NOT VALID; --- will error out with "ERROR: CHECK CONSTRAINT "t1_a_check" is violated by some row" +-- will error out with +-- "ERROR: CHECK CONSTRAINT "t1_a_check" of relation "t1" is violated by some row" ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check; -ERROR: check constraint "t1_a_check" is violated by some row +ERROR: check constraint "t1_a_check" of relation "t1" is violated by some row -- remove violating row DELETE FROM t1 where a = 1; -- verify no rows in t1 @@ -512,7 +513,7 @@ SELECT * FROM t1; -- this will still error out ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check; -ERROR: check constraint "t1_a_check" is violated by some row +ERROR: check constraint "t1_a_check" of relation "t1" is violated by some row -- The check will pass when the local copies are truncated SELECT truncate_local_data_after_distributing_table('t1'); truncate_local_data_after_distributing_table diff --git a/src/test/regress/expected/partition_wise_join.out b/src/test/regress/expected/partition_wise_join.out index 63ae67af3..7314ce277 100644 --- a/src/test/regress/expected/partition_wise_join.out +++ b/src/test/regress/expected/partition_wise_join.out @@ -86,15 +86,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -> Task Node: host=localhost port=xxxxx dbname=regression -> Hash Join - Hash Cond: ((partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) AND (partitioning_hash_join_test_xxx.subid = partitioning_hash_test_xxx.subid)) + Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid)) -> Append - -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx + -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1 + -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2 + -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3 -> Hash -> Append - -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx - -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx + -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1 + -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2 (15 rows) -- set partition-wise join on and parallel to off @@ -124,15 +124,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -> Task Node: host=localhost port=xxxxx dbname=regression -> Hash Join - Hash Cond: ((partitioning_hash_test_xxx.id = partitioning_hash_join_test_xxx.id) AND (partitioning_hash_test_xxx.subid = partitioning_hash_join_test_xxx.subid)) + Hash Cond: ((partitioning_hash_test.id = partitioning_hash_join_test.id) AND (partitioning_hash_test.subid = partitioning_hash_join_test.subid)) -> Append - -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx - -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx + -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1 + -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2 -> Hash -> Append - -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx + -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1 + -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2 + -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3 (15 rows) -- note that partition-wise joins only work when partition key is in the join @@ -148,15 +148,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id) -> Task Node: host=localhost port=xxxxx dbname=regression -> Hash Join - Hash Cond: (partitioning_hash_test_xxx.id = partitioning_hash_join_test_xxx.id) + Hash Cond: (partitioning_hash_test.id = partitioning_hash_join_test.id) -> Append - -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx - -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx + -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1 + -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2 -> Hash -> Append - -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx + -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1 + -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2 + -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3 (15 rows) -- reset partition-wise join diff --git a/src/test/regress/expected/partition_wise_join_0.out b/src/test/regress/expected/partition_wise_join_0.out index 559862094..9dd39a119 100644 --- a/src/test/regress/expected/partition_wise_join_0.out +++ b/src/test/regress/expected/partition_wise_join_0.out @@ -86,15 +86,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -> Task Node: host=localhost port=xxxxx dbname=regression -> Hash Join - Hash Cond: ((partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) AND (partitioning_hash_join_test_xxx.subid = partitioning_hash_test_xxx.subid)) + Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid)) -> Append - -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx + -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1 + -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2 + -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3 -> Hash -> Append - -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx - -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx + -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1 + -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2 (15 rows) -- set partition-wise join on and parallel to off @@ -124,15 +124,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id, -> Task Node: host=localhost port=xxxxx dbname=regression -> Hash Join - Hash Cond: ((partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) AND (partitioning_hash_join_test_xxx.subid = partitioning_hash_test_xxx.subid)) + Hash Cond: ((partitioning_hash_join_test.id = partitioning_hash_test.id) AND (partitioning_hash_join_test.subid = partitioning_hash_test.subid)) -> Append - -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx + -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1 + -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2 + -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3 -> Hash -> Append - -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx - -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx + -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1 + -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2 (15 rows) -- note that partition-wise joins only work when partition key is in the join @@ -148,15 +148,15 @@ SELECT * FROM partitioning_hash_test JOIN partitioning_hash_join_test USING (id) -> Task Node: host=localhost port=xxxxx dbname=regression -> Hash Join - Hash Cond: (partitioning_hash_join_test_xxx.id = partitioning_hash_test_xxx.id) + Hash Cond: (partitioning_hash_join_test.id = partitioning_hash_test.id) -> Append - -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_xxx - -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_xxx + -> Seq Scan on partitioning_hash_join_test_0_360163 partitioning_hash_join_test_1 + -> Seq Scan on partitioning_hash_join_test_1_360167 partitioning_hash_join_test_2 + -> Seq Scan on partitioning_hash_join_test_2_360171 partitioning_hash_join_test_3 -> Hash -> Append - -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_xxx - -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_xxx + -> Seq Scan on partitioning_hash_test_0_360151 partitioning_hash_test_1 + -> Seq Scan on partitioning_hash_test_1_360155 partitioning_hash_test_2 (15 rows) -- reset partition-wise join diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out index dcfd0a8c4..c5e4a11c7 100644 --- a/src/test/regress/expected/pg12.out +++ b/src/test/regress/expected/pg12.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif SET citus.shard_replication_factor to 1; SET citus.next_shard_id TO 60000; SET citus.next_placement_id TO 60000; @@ -667,4 +660,4 @@ DROP USER read_access; drop schema test_pg12 cascade; NOTICE: drop cascades to 16 other objects \set VERBOSITY default -SET citus.shard_replication_factor to 2; +SET citus.shard_replication_factor to 2; \ No newline at end of file diff --git a/src/test/regress/expected/pg13.out b/src/test/regress/expected/pg13.out index 7dc52ee52..cba4f954e 100644 --- a/src/test/regress/expected/pg13.out +++ b/src/test/regress/expected/pg13.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif create schema test_pg13; set search_path to test_pg13; SET citus.shard_replication_factor to 1; diff --git a/src/test/regress/expected/pg13_0.out b/src/test/regress/expected/pg13_0.out deleted file mode 100644 index e25fbb82d..000000000 --- a/src/test/regress/expected/pg13_0.out +++ /dev/null @@ -1,6 +0,0 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q diff --git a/src/test/regress/expected/pg13_propagate_statistics.out b/src/test/regress/expected/pg13_propagate_statistics.out index 583a17d86..9f64aad21 100644 --- a/src/test/regress/expected/pg13_propagate_statistics.out +++ b/src/test/regress/expected/pg13_propagate_statistics.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif CREATE SCHEMA "statistics'TestTarget"; SET search_path TO "statistics'TestTarget"; SET citus.next_shard_id TO 980000; diff --git a/src/test/regress/expected/pg13_propagate_statistics_0.out b/src/test/regress/expected/pg13_propagate_statistics_0.out deleted file mode 100644 index e25fbb82d..000000000 --- a/src/test/regress/expected/pg13_propagate_statistics_0.out +++ /dev/null @@ -1,6 +0,0 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q diff --git a/src/test/regress/expected/pg13_with_ties.out b/src/test/regress/expected/pg13_with_ties.out index efa8db940..1587d6bbe 100644 --- a/src/test/regress/expected/pg13_with_ties.out +++ b/src/test/regress/expected/pg13_with_ties.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif CREATE TABLE with_ties_table (a INT, b INT); SELECT create_distributed_table('with_ties_table', 'a'); create_distributed_table diff --git a/src/test/regress/expected/pg13_with_ties_0.out b/src/test/regress/expected/pg13_with_ties_0.out deleted file mode 100644 index e25fbb82d..000000000 --- a/src/test/regress/expected/pg13_with_ties_0.out +++ /dev/null @@ -1,6 +0,0 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q diff --git a/src/test/regress/expected/pg_dump.out b/src/test/regress/expected/pg_dump.out index b3c5dab41..9a297f2c5 100644 --- a/src/test/regress/expected/pg_dump.out +++ b/src/test/regress/expected/pg_dump.out @@ -1,6 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 12 AS have_table_am -\gset CREATE TEMPORARY TABLE output (line text); CREATE SCHEMA dumper; SET search_path TO 'dumper'; @@ -29,17 +26,9 @@ COPY data TO STDOUT; 4 {} 2 {$":9} 2 {$":9} -\if :have_table_am CREATE TABLE simple_columnar(i INT, t TEXT) USING columnar; -\else -CREATE TABLE simple_columnar(i INT, t TEXT); -\endif INSERT INTO simple_columnar VALUES (1, 'one'), (2, 'two'); -\if :have_table_am CREATE TABLE dist_columnar(i INT, t TEXT) USING columnar; -\else -CREATE TABLE dist_columnar(i INT, t TEXT); -\endif SELECT create_distributed_table('dist_columnar', 'i'); create_distributed_table --------------------------------------------------------------------- @@ -123,13 +112,8 @@ COPY dumper."weird.table" ("data.jsonb", "?empty(") TO STDOUT WITH (format csv, data.jsonb,?empty( "{""weird"": {""table"": ""{:""}}","" "{""?\"""": []}","" --- If server supports table access methods, check to be sure that the --- recreated table is still columnar. Otherwise, just return true. -\if :have_table_am +-- Check to be sure that the recreated table is still columnar. \set is_columnar '(SELECT amname=''columnar'' from pg_am where relam=oid)' -\else -\set is_columnar TRUE -\endif SELECT :is_columnar AS check_columnar FROM pg_class WHERE oid='simple_columnar'::regclass; check_columnar --------------------------------------------------------------------- diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index ec900db1e..8d43769f1 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -1,11 +1,3 @@ --- print whether we're using version > 12 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve; - version_above_twelve ---------------------------------------------------------------------- - t -(1 row) - CREATE SCHEMA "extension'test"; -- use a schema name with escape character SET search_path TO "extension'test"; diff --git a/src/test/regress/expected/propagate_extension_commands_1.out b/src/test/regress/expected/propagate_extension_commands_1.out deleted file mode 100644 index fcbde2156..000000000 --- a/src/test/regress/expected/propagate_extension_commands_1.out +++ /dev/null @@ -1,643 +0,0 @@ --- print whether we're using version > 12 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve; - version_above_twelve ---------------------------------------------------------------------- - f -(1 row) - -CREATE SCHEMA "extension'test"; --- use a schema name with escape character -SET search_path TO "extension'test"; -SET client_min_messages TO WARNING; --- create an extension on the given search_path --- the extension is on contrib, so should be avaliable for the regression tests -CREATE EXTENSION seg; --- make sure that both the schema and the extension is distributed -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test'); - count ---------------------------------------------------------------------- - 1 -(1 row) - -CREATE TABLE test_table (key int, value seg); -SELECT create_distributed_table('test_table', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- make sure that the table is also distributed now -SELECT count(*) from pg_dist_partition where logicalrelid='extension''test.test_table'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - -CREATE TYPE two_segs AS (seg_1 seg, seg_2 seg); --- verify that the type that depends on the extension is also marked as distributed -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_type WHERE typname = 'two_segs' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'extension''test')); - count ---------------------------------------------------------------------- - 1 -(1 row) - --- now try to run CREATE EXTENSION within a transction block, all should work fine -BEGIN; - CREATE EXTENSION isn WITH SCHEMA public; - -- now, try create a reference table relying on the data types - -- this should not succeed as we do not distribute extension commands within transaction blocks - CREATE TABLE dist_table (key int, value public.issn); - SELECT create_distributed_table('dist_table', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - - -- we can even run queries (sequentially) over the distributed table - SELECT * FROM dist_table; - key | value ---------------------------------------------------------------------- -(0 rows) - - INSERT INTO dist_table VALUES (1, public.issn('1436-4522')); - INSERT INTO dist_table SELECT * FROM dist_table RETURNING *; - key | value ---------------------------------------------------------------------- - 1 | 1436-4522 -(1 row) - -COMMIT; --- make sure that the extension is distributed even if we run create extension in a transaction block -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) -(2 rows) - -CREATE TABLE ref_table (a public.issn); --- now, create a reference table relying on the data types -SELECT create_reference_table('ref_table'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - --- now, drop the extension, recreate it with an older version and update it to latest version -DROP EXTENSION isn CASCADE; -CREATE EXTENSION isn WITH VERSION "1.1"; --- before updating the version, ensure the current version -SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1.1) - (localhost,57638,t,1.1) -(2 rows) - --- now, update to a newer version -ALTER EXTENSION isn UPDATE TO '1.2'; --- show that ALTER EXTENSION is propagated -SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1.2) - (localhost,57638,t,1.2) -(2 rows) - --- before changing the schema, ensure the current schmea -SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,extension'test) - (localhost,57638,t,extension'test) -(2 rows) - --- now change the schema -ALTER EXTENSION isn SET SCHEMA public; --- switch back to public schema as we set extension's schema to public -SET search_path TO public; --- make sure that the extension is distributed -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count ---------------------------------------------------------------------- - 1 -(1 row) - --- show that the ALTER EXTENSION command is propagated -SELECT run_command_on_workers($$SELECT nspname from pg_namespace where oid=(SELECT extnamespace FROM pg_extension WHERE extname = 'isn')$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,public) - (localhost,57638,t,public) -(2 rows) - --- drop the extension finally -DROP EXTENSION isn CASCADE; --- now make sure that the reference tables depending on an extension can be succesfully created. --- we should also ensure that we replicate this reference table (and hence the extension) --- to new nodes after calling master_activate_node. --- now, first drop seg and existing objects before next test -DROP EXTENSION seg CASCADE; --- but as we have only 2 ports in postgresql tests, let's remove one of the nodes first --- before remove, first remove the existing relations (due to the other tests) -DROP SCHEMA "extension'test" CASCADE; -SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- then create the extension -CREATE EXTENSION seg; --- show that the extension is created on existing worker -SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) -(1 row) - -SELECT workers.result = pg_extension.extversion AS same_version - FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg'; - same_version ---------------------------------------------------------------------- - t -(1 row) - --- now create the reference table -CREATE TABLE ref_table_2 (x seg); -SELECT create_reference_table('ref_table_2'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - --- we also add an old style extension from before extensions which we upgrade to an extension --- by exercising it before the add node we verify it will create the extension (without upgrading) --- it on the new worker as well. For this we use the dict_int extension which is in contrib, --- supports FROM unpackaged, and is relatively small --- create objects for dict_int manually so we can upgrade from unpacked -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); -SELECT run_command_on_workers($$ -CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE TEXT SEARCH TEMPLATE") -(1 row) - -CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -CREATE EXTENSION dict_int FROM unpackaged; -SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) -(1 row) - -SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1.0) -(1 row) - --- adding the second node will fail as the text search template needs to be created manually -SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- create the text search template manually on the worker -\c - - - :worker_2_port -SET citus.enable_metadata_sync TO false; -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -ERROR: function "dintdict_init" already exists with same argument types -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -ERROR: function "dintdict_lexize" already exists with same argument types -CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); -ERROR: duplicate key value violates unique constraint "pg_ts_template_tmplname_index" -DETAIL: Key (tmplname, tmplnamespace)=(intdict_template, 2200) already exists. -RESET citus.enable_metadata_sync; -\c - - - :master_port -SET client_min_messages TO WARNING; --- add the second node now -SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- show that the extension is created on both existing and new node -SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) -(2 rows) - -SELECT workers.result = pg_extension.extversion AS same_version - FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers, pg_extension WHERE extname = 'seg'; - same_version ---------------------------------------------------------------------- - t - t -(2 rows) - --- check for the unpackaged extension to be created correctly -SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) -(2 rows) - -SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1.0) - (localhost,57638,t,1.0) -(2 rows) - --- and similarly check for the reference table -select count(*) from pg_dist_partition where partmethod='n' and logicalrelid='ref_table_2'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='ref_table_2'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - -DROP TABLE ref_table_2; --- now test create extension in another transaction block but rollback this time -BEGIN; - CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public; -ROLLBACK; --- at the end of the transaction block, we did not create isn extension in coordinator or worker nodes as we rollback'ed --- make sure that the extension is not distributed -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'isn'); - count ---------------------------------------------------------------------- - 0 -(1 row) - --- and the extension does not exist on workers -SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'isn'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) -(2 rows) - --- give a notice for the following commands saying that it is not --- propagated to the workers. the user should run it manually on the workers -CREATE TABLE t1 (A int); -CREATE VIEW v1 AS select * from t1; -ALTER EXTENSION seg ADD VIEW v1; -ALTER EXTENSION seg DROP VIEW v1; -DROP VIEW v1; -DROP TABLE t1; --- drop multiple extensions at the same time -CREATE EXTENSION isn WITH VERSION '1.1' SCHEMA public; --- let's create another extension locally -set citus.enable_ddl_propagation to 'off'; -CREATE EXTENSION pg_buffercache; -set citus.enable_ddl_propagation to 'on'; -DROP EXTENSION pg_buffercache, isn CASCADE; -SELECT count(*) FROM pg_extension WHERE extname IN ('pg_buffercache', 'isn'); - count ---------------------------------------------------------------------- - 0 -(1 row) - --- drop extension should just work -DROP EXTENSION seg CASCADE; -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'seg'$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) -(2 rows) - --- make sure that the extension is not avaliable anymore as a distributed object -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid = (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); - count ---------------------------------------------------------------------- - 0 -(1 row) - -CREATE SCHEMA "extension'test"; -SET search_path TO "extension'test"; --- check restriction for sequential execution --- enable it and see that create command errors but continues its execution by changing citus.multi_shard_modify_mode TO 'off -BEGIN; - SET LOCAL citus.create_object_propagation TO deferred; - CREATE TABLE some_random_table (a int); - SELECT create_distributed_table('some_random_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - - CREATE EXTENSION seg; - CREATE TABLE some_random_table_2 (a int, b seg); - SELECT create_distributed_table('some_random_table_2', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -ROLLBACK; --- show that the CREATE EXTENSION command propagated even if the transaction --- block is rollbacked, that's a shortcoming of dependency creation logic -SELECT COUNT(DISTINCT workers.result) - FROM run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'seg'$$) workers; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- drop the schema and all the objects -DROP SCHEMA "extension'test" CASCADE; --- recreate for the next tests -CREATE SCHEMA "extension'test"; --- use a schema name with escape character -SET search_path TO "extension'test"; --- remove the node, we'll add back again -SELECT 1 from master_remove_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- Test extension function incorrect distribution argument -CREATE TABLE test_extension_function(col varchar); -CREATE EXTENSION seg; --- Missing distribution argument -SELECT create_distributed_function('seg_in(cstring)'); -ERROR: Extension functions(seg_in) without distribution argument are not supported. --- Missing colocation argument -SELECT create_distributed_function('seg_in(cstring)', '$1'); -ERROR: cannot distribute the function "seg_in" since there is no table to colocate with -HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() --- Incorrect distribution argument -SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); -ERROR: cannot distribute the function "seg_in" since the distribution argument is not valid -HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() --- Colocated table is not distributed -SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); -ERROR: relation test_extension_function is not distributed -DROP EXTENSION seg; -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- now, create a type that depends on another type, which --- finally depends on an extension -BEGIN; - CREATE EXTENSION seg; - CREATE EXTENSION isn; - CREATE TYPE test_type AS (a int, b seg); - CREATE TYPE test_type_2 AS (a int, b test_type); - CREATE TABLE t2 (a int, b test_type_2, c issn); - SELECT create_distributed_table('t2', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - - CREATE TYPE test_type_3 AS (a int, b test_type, c issn); - CREATE TABLE t3 (a int, b test_type_3); - SELECT create_reference_table('t3'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - - -- Distribute an extension-function - SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -COMMIT; --- Check the pg_dist_object -SELECT pg_proc.proname as DistributedFunction -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; - distributedfunction ---------------------------------------------------------------------- - seg_in -(1 row) - -SELECT run_command_on_workers($$ -SELECT count(*) -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) -(1 row) - --- add the node back -SELECT 1 from master_add_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- make sure that both extensions are created on both nodes -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); - count ---------------------------------------------------------------------- - 2 -(1 row) - -SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,2) - (localhost,57638,t,2) -(2 rows) - --- Check the pg_dist_object on the both nodes -SELECT run_command_on_workers($$ -SELECT count(*) -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) -(2 rows) - -DROP EXTENSION seg CASCADE; --- Recheck the pg_dist_object -SELECT pg_proc.proname as DistributedFunction -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; - distributedfunction ---------------------------------------------------------------------- -(0 rows) - -SELECT run_command_on_workers($$ -SELECT count(*) -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) -(2 rows) - --- Distribute an extension-function where extension is not in pg_dist_object -SET citus.enable_ddl_propagation TO false; -CREATE EXTENSION seg; -SET citus.enable_ddl_propagation TO true; --- Check the extension in pg_dist_object -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND -objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT run_command_on_workers($$ -SELECT count(*) -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,0) - (localhost,57638,t,0) -(2 rows) - -SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - --- Recheck the extension in pg_dist_object -SELECT count(*) FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND -objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); - count ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT pg_proc.proname as DistributedFunction -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; - distributedfunction ---------------------------------------------------------------------- - seg_in -(1 row) - -SELECT run_command_on_workers($$ -SELECT count(*) -FROM pg_catalog.pg_dist_object, pg_proc -WHERE pg_proc.proname = 'seg_in' and -pg_proc.oid = pg_catalog.pg_dist_object.objid and -classid = 'pg_proc'::regclass; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,1) - (localhost,57638,t,1) -(2 rows) - -DROP EXTENSION seg; -DROP TABLE test_extension_function; --- Test extension function altering distribution argument -BEGIN; -SET citus.shard_replication_factor = 1; -SET citus.multi_shard_modify_mode TO sequential; -CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); -SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE EXTENSION cube; -SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -SELECT distribution_argument_index FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND -objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); - distribution_argument_index ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -SELECT distribution_argument_index FROM pg_catalog.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND -objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); - distribution_argument_index ---------------------------------------------------------------------- - 1 -(1 row) - -ROLLBACK; --- Postgres already doesn't allow creating extensions in temp schema but --- let's have a test for that to track any furher changes in postgres. -DROP EXTENSION isn CASCADE; -CREATE EXTENSION isn WITH SCHEMA pg_temp; -ERROR: schema "pg_temp" does not exist --- drop the schema and all the objects -DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/expected/sqlsmith_failures.out b/src/test/regress/expected/sqlsmith_failures.out index 6440cf75f..f2c9854ef 100644 --- a/src/test/regress/expected/sqlsmith_failures.out +++ b/src/test/regress/expected/sqlsmith_failures.out @@ -4,9 +4,6 @@ CREATE SCHEMA sqlsmith_failures; SET search_path TO sqlsmith_failures, public; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 1280000; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset begin; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; create table countries( @@ -36,7 +33,6 @@ select create_distributed_table('orgs', 'id'); (1 row) -\if :server_version_above_eleven -- pg12 and above support generated columns create table users ( id bigserial @@ -47,18 +43,6 @@ create table users ( , score bigint generated always as (id + country_id) stored , primary key (org_id, id) ); -\else --- pg11 and below don't have generated columns, use a normal column -create table users ( - id bigserial - , org_id bigint references orgs(id) - , name text - , created_at timestamptz default now() - , country_id int -- references countries(id) - , score bigint - , primary key (org_id, id) -); -\endif select create_distributed_table('users', 'org_id'); create_distributed_table --------------------------------------------------------------------- diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out index d3f961124..0126f6fed 100644 --- a/src/test/regress/expected/start_stop_metadata_sync.out +++ b/src/test/regress/expected/start_stop_metadata_sync.out @@ -157,12 +157,12 @@ SELECT * FROM test_matview; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- - events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f - events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f - events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f - events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f - events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f - events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f + events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f + events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f + events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390012 | s | f + events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f + events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f + events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 1390013 | c | f (6 rows) SELECT count(*) > 0 FROM pg_dist_node; diff --git a/src/test/regress/expected/tableam.out b/src/test/regress/expected/tableam.out index 50e9fcf91..e211e2bf1 100644 --- a/src/test/regress/expected/tableam.out +++ b/src/test/regress/expected/tableam.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif SET citus.shard_replication_factor to 1; SET citus.next_shard_id TO 60000; SET citus.next_placement_id TO 60000; diff --git a/src/test/regress/expected/upgrade_columnar_after.out b/src/test/regress/expected/upgrade_columnar_after.out index 518cc1590..0da9bb17f 100644 --- a/src/test/regress/expected/upgrade_columnar_after.out +++ b/src/test/regress/expected/upgrade_columnar_after.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif SET search_path TO upgrade_columnar, public; -- test we retained data SELECT * FROM test_retains_data ORDER BY a; diff --git a/src/test/regress/expected/upgrade_columnar_after_0.out b/src/test/regress/expected/upgrade_columnar_after_0.out deleted file mode 100644 index 3f4eaf233..000000000 --- a/src/test/regress/expected/upgrade_columnar_after_0.out +++ /dev/null @@ -1,6 +0,0 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q diff --git a/src/test/regress/expected/upgrade_columnar_before.out b/src/test/regress/expected/upgrade_columnar_before.out index 8e8494d31..f4257ad17 100644 --- a/src/test/regress/expected/upgrade_columnar_before.out +++ b/src/test/regress/expected/upgrade_columnar_before.out @@ -1,10 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif -- Test if relying on topological sort of the objects, not their names, works -- fine when re-creating objects during pg_upgrade. ALTER SCHEMA public RENAME TO citus_schema; diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 98fe422a4..63c82f3d7 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -1,11 +1,3 @@ --- print version above 11 (eg. 12 and above) -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - version_above_eleven ---------------------------------------------------------------------- - t -(1 row) - -- list all postgres objects belonging to the citus extension SELECT pg_catalog.pg_describe_object(classid, objid, 0) AS description FROM pg_catalog.pg_depend, pg_catalog.pg_extension e diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 07eebd2d5..1a050c0ea 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -24,7 +24,7 @@ SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:= COPY customer_copy_hash FROM STDIN; -- Test syntax error COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; -ERROR: invalid input syntax for integer: "1,customer1" +ERROR: invalid input syntax for type integer: "1,customer1" CONTEXT: COPY customer_copy_hash, line 1, column c_custkey: "1,customer1" -- Test invalid option COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard xxxxx); @@ -267,7 +267,7 @@ SET citus.shard_replication_factor TO 2; BEGIN; SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); -ERROR: invalid input syntax for integer: "notinteger" +ERROR: invalid input syntax for type integer: "notinteger" CONTEXT: COPY customer_copy_append, line 3, column c_custkey: "notinteger" END; -- Test that no shard is created for failing copy @@ -972,7 +972,7 @@ SELECT * FROM copy_jsonb ORDER BY key; -- JSONB parsing error without validation: no line number \COPY copy_jsonb (key, value) FROM STDIN -ERROR: invalid input syntax for json +ERROR: invalid input syntax for type json DETAIL: The input string ended unexpectedly. TRUNCATE copy_jsonb; -- JSONB when there is a complex column should work. Complex columns force @@ -1019,7 +1019,7 @@ SELECT * FROM copy_jsonb ORDER BY key; -- JSONB parsing error with validation: should see line number \COPY copy_jsonb (key, value) FROM STDIN -ERROR: invalid input syntax for json +ERROR: invalid input syntax for type json DETAIL: The input string ended unexpectedly. CONTEXT: JSON data, line 1: {"r":255,"g":0,"b":0 COPY copy_jsonb, line 1, column value: "{"r":255,"g":0,"b":0" diff --git a/src/test/regress/sql/alter_distributed_table.sql b/src/test/regress/sql/alter_distributed_table.sql index 8867f660b..96da74504 100644 --- a/src/test/regress/sql/alter_distributed_table.sql +++ b/src/test/regress/sql/alter_distributed_table.sql @@ -1,7 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven; -\gset - CREATE SCHEMA alter_distributed_table; SET search_path TO alter_distributed_table; SET citus.shard_count TO 4; @@ -137,14 +133,12 @@ SELECT alter_distributed_table('col_with_ref_to_ref', shard_count:=10, cascade_t SELECT alter_distributed_table('col_with_ref_to_dist', shard_count:=6, cascade_to_colocated:=true); -\if :server_version_above_eleven -- test altering columnar table CREATE TABLE columnar_table (a INT) USING columnar; SELECT create_distributed_table('columnar_table', 'a', colocate_with:='none'); SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table'; SELECT alter_distributed_table('columnar_table', shard_count:=6); SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHERE table_name::text = 'columnar_table'; -\endif -- test complex cascade operations diff --git a/src/test/regress/sql/alter_table_set_access_method.sql b/src/test/regress/sql/alter_table_set_access_method.sql index 0ffabf664..33d4de0d5 100644 --- a/src/test/regress/sql/alter_table_set_access_method.sql +++ b/src/test/regress/sql/alter_table_set_access_method.sql @@ -4,14 +4,6 @@ CREATE TABLE alter_am_pg_version_table (a INT); SELECT alter_table_set_access_method('alter_am_pg_version_table', 'columnar'); DROP TABLE alter_am_pg_version_table; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif - CREATE SCHEMA alter_table_set_access_method; SET search_path TO alter_table_set_access_method; diff --git a/src/test/regress/sql/columnar_truncate.sql b/src/test/regress/sql/columnar_truncate.sql index b72e9336c..0e6051967 100644 --- a/src/test/regress/sql/columnar_truncate.sql +++ b/src/test/regress/sql/columnar_truncate.sql @@ -2,10 +2,6 @@ -- Test the TRUNCATE TABLE command for columnar tables. -- --- print whether we're using version > 10 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 10 AS version_above_ten; - -- CREATE a columnar table, fill with some data -- CREATE TABLE columnar_truncate_test (a int, b int) USING columnar; CREATE TABLE columnar_truncate_test_second (a int, b int) USING columnar; diff --git a/src/test/regress/sql/columnar_types_without_comparison.sql b/src/test/regress/sql/columnar_types_without_comparison.sql index 0bf1e89f3..7cb347e0e 100644 --- a/src/test/regress/sql/columnar_types_without_comparison.sql +++ b/src/test/regress/sql/columnar_types_without_comparison.sql @@ -71,14 +71,6 @@ SELECT * FROM test_user_defined_color WHERE a = 'red'; DROP TABLE test_user_defined_color; DROP TYPE user_defined_color; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif - -- pg_snapshot CREATE TABLE test_pg_snapshot (a pg_snapshot) USING columnar; INSERT INTO test_pg_snapshot VALUES ('10:20:10,14,15'); diff --git a/src/test/regress/sql/cte_inline.sql b/src/test/regress/sql/cte_inline.sql index a72d75039..28691e35a 100644 --- a/src/test/regress/sql/cte_inline.sql +++ b/src/test/regress/sql/cte_inline.sql @@ -6,11 +6,6 @@ SELECT create_distributed_table ('test_table', 'key'); INSERT INTO test_table SELECT i % 10, 'test' || i, row_to_json(row(i, i*18, 'test' || i)) FROM generate_series (0, 100) i; --- server version because CTE inlining might produce --- different debug messages in PG 11 vs PG 12 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 12; - SET client_min_messages TO DEBUG; -- Citus should not inline this CTE because otherwise it cannot diff --git a/src/test/regress/sql/distributed_types_xact_add_enum_value.sql b/src/test/regress/sql/distributed_types_xact_add_enum_value.sql index c3eb19beb..7cc9d01a6 100644 --- a/src/test/regress/sql/distributed_types_xact_add_enum_value.sql +++ b/src/test/regress/sql/distributed_types_xact_add_enum_value.sql @@ -1,5 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; SET citus.next_shard_id TO 20040000; CREATE SCHEMA xact_enum_type; diff --git a/src/test/regress/sql/follower_single_node.sql b/src/test/regress/sql/follower_single_node.sql index 482c0b575..71e1dd3bc 100644 --- a/src/test/regress/sql/follower_single_node.sql +++ b/src/test/regress/sql/follower_single_node.sql @@ -1,7 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 12 AS have_table_am -\gset - \c - - - :master_port CREATE SCHEMA single_node; SET search_path TO single_node; @@ -144,11 +140,7 @@ RESET citus.task_assignment_policy; -- Simple columnar follower test \c -reuse-previous=off regression - - :master_port -\if :have_table_am CREATE TABLE columnar_test (a int, b int) USING columnar; -\else -CREATE TABLE columnar_test (a int, b int); -\endif INSERT INTO columnar_test(a, b) VALUES (1, 1); INSERT INTO columnar_test(a, b) VALUES (1, 2); diff --git a/src/test/regress/sql/grant_on_foreign_server_propagation.sql b/src/test/regress/sql/grant_on_foreign_server_propagation.sql index 75504ebb5..d2ecd482b 100644 --- a/src/test/regress/sql/grant_on_foreign_server_propagation.sql +++ b/src/test/regress/sql/grant_on_foreign_server_propagation.sql @@ -1,14 +1,6 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif -- -- GRANT_ON_FOREIGN_SERVER_PROPAGATION --- We can't execute this file for PG12, as 'password_required' option for USER MAPPING --- is introduced in PG13. +-- 'password_required' option for USER MAPPING is introduced in PG13. -- CREATE SCHEMA "grant on server"; SET search_path TO "grant on server"; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index b52c7cb62..b989051a7 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -5,12 +5,8 @@ -- -- It'd be nice to script generation of this file, but alas, that's -- not done yet. - --- differentiate the output file for pg11 and versions above, with regards to objects --- created per citus version depending on the postgres version. Upgrade tests verify the --- objects are added in citus_finish_pg_upgrade() -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; +-- +-- Upgrade tests verify the objects are added in citus_finish_pg_upgrade() SET citus.next_shard_id TO 580000; CREATE SCHEMA multi_extension; diff --git a/src/test/regress/sql/multi_truncate.sql b/src/test/regress/sql/multi_truncate.sql index fa532fd82..1844dc8d2 100644 --- a/src/test/regress/sql/multi_truncate.sql +++ b/src/test/regress/sql/multi_truncate.sql @@ -290,7 +290,8 @@ INSERT INTO t1 VALUES(1,1); SELECT create_distributed_table('t1', 'a'); ALTER TABLE t1 ADD CONSTRAINT t1_a_check CHECK(a > 2) NOT VALID; --- will error out with "ERROR: CHECK CONSTRAINT "t1_a_check" is violated by some row" +-- will error out with +-- "ERROR: CHECK CONSTRAINT "t1_a_check" of relation "t1" is violated by some row" ALTER TABLE t1 VALIDATE CONSTRAINT t1_a_check; -- remove violating row DELETE FROM t1 where a = 1; diff --git a/src/test/regress/sql/pg12.sql b/src/test/regress/sql/pg12.sql index 4e369106c..db0b3c3fc 100644 --- a/src/test/regress/sql/pg12.sql +++ b/src/test/regress/sql/pg12.sql @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif - SET citus.shard_replication_factor to 1; SET citus.next_shard_id TO 60000; SET citus.next_placement_id TO 60000; diff --git a/src/test/regress/sql/pg13.sql b/src/test/regress/sql/pg13.sql index e33751b89..11c1145d7 100644 --- a/src/test/regress/sql/pg13.sql +++ b/src/test/regress/sql/pg13.sql @@ -1,12 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif - - create schema test_pg13; set search_path to test_pg13; diff --git a/src/test/regress/sql/pg13_propagate_statistics.sql b/src/test/regress/sql/pg13_propagate_statistics.sql index e47b111fb..5b19f793a 100644 --- a/src/test/regress/sql/pg13_propagate_statistics.sql +++ b/src/test/regress/sql/pg13_propagate_statistics.sql @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif - CREATE SCHEMA "statistics'TestTarget"; SET search_path TO "statistics'TestTarget"; SET citus.next_shard_id TO 980000; diff --git a/src/test/regress/sql/pg13_with_ties.sql b/src/test/regress/sql/pg13_with_ties.sql index b8ba29bb7..b0a22b4b1 100644 --- a/src/test/regress/sql/pg13_with_ties.sql +++ b/src/test/regress/sql/pg13_with_ties.sql @@ -1,12 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS server_version_above_twelve -\gset -\if :server_version_above_twelve -\else -\q -\endif - - CREATE TABLE with_ties_table (a INT, b INT); SELECT create_distributed_table('with_ties_table', 'a'); INSERT INTO with_ties_table VALUES (10, 20), (11, 21), (12, 22), (12, 22), (12, 22), (12, 23), (14, 24); diff --git a/src/test/regress/sql/pg_dump.sql b/src/test/regress/sql/pg_dump.sql index c8c7f45fd..7604238f0 100644 --- a/src/test/regress/sql/pg_dump.sql +++ b/src/test/regress/sql/pg_dump.sql @@ -1,7 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 12 AS have_table_am -\gset - CREATE TEMPORARY TABLE output (line text); CREATE SCHEMA dumper; @@ -29,19 +25,11 @@ COPY data FROM STDIN WITH (format csv, delimiter '|', escape '\'); -- data should now appear twice COPY data TO STDOUT; -\if :have_table_am CREATE TABLE simple_columnar(i INT, t TEXT) USING columnar; -\else -CREATE TABLE simple_columnar(i INT, t TEXT); -\endif INSERT INTO simple_columnar VALUES (1, 'one'), (2, 'two'); -\if :have_table_am CREATE TABLE dist_columnar(i INT, t TEXT) USING columnar; -\else -CREATE TABLE dist_columnar(i INT, t TEXT); -\endif SELECT create_distributed_table('dist_columnar', 'i'); @@ -82,13 +70,8 @@ SELECT create_distributed_table('dist_columnar', 'i'); COPY data (value) TO STDOUT WITH (format csv, force_quote *); COPY dumper."weird.table" ("data.jsonb", "?empty(") TO STDOUT WITH (format csv, force_quote ("?empty("), null 'null', header true); --- If server supports table access methods, check to be sure that the --- recreated table is still columnar. Otherwise, just return true. -\if :have_table_am +-- Check to be sure that the recreated table is still columnar. \set is_columnar '(SELECT amname=''columnar'' from pg_am where relam=oid)' -\else -\set is_columnar TRUE -\endif SELECT :is_columnar AS check_columnar FROM pg_class WHERE oid='simple_columnar'::regclass; diff --git a/src/test/regress/sql/propagate_extension_commands.sql b/src/test/regress/sql/propagate_extension_commands.sql index bd0d01cf7..d2a7db039 100644 --- a/src/test/regress/sql/propagate_extension_commands.sql +++ b/src/test/regress/sql/propagate_extension_commands.sql @@ -1,7 +1,3 @@ --- print whether we're using version > 12 to make version-specific tests clear -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve; - CREATE SCHEMA "extension'test"; -- use a schema name with escape character diff --git a/src/test/regress/sql/sqlsmith_failures.sql b/src/test/regress/sql/sqlsmith_failures.sql index 8190fdbb8..10a81ff06 100644 --- a/src/test/regress/sql/sqlsmith_failures.sql +++ b/src/test/regress/sql/sqlsmith_failures.sql @@ -5,10 +5,6 @@ SET search_path TO sqlsmith_failures, public; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 1280000; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset - begin; SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; @@ -28,7 +24,6 @@ create table orgs ( ); select create_distributed_table('orgs', 'id'); -\if :server_version_above_eleven -- pg12 and above support generated columns create table users ( id bigserial @@ -39,18 +34,6 @@ create table users ( , score bigint generated always as (id + country_id) stored , primary key (org_id, id) ); -\else --- pg11 and below don't have generated columns, use a normal column -create table users ( - id bigserial - , org_id bigint references orgs(id) - , name text - , created_at timestamptz default now() - , country_id int -- references countries(id) - , score bigint - , primary key (org_id, id) -); -\endif select create_distributed_table('users', 'org_id'); alter table users add constraint fk_user_country foreign key (country_id) references countries(id); diff --git a/src/test/regress/sql/tableam.sql b/src/test/regress/sql/tableam.sql index 1ddc7a90c..47845492a 100644 --- a/src/test/regress/sql/tableam.sql +++ b/src/test/regress/sql/tableam.sql @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif - SET citus.shard_replication_factor to 1; SET citus.next_shard_id TO 60000; SET citus.next_placement_id TO 60000; diff --git a/src/test/regress/sql/upgrade_columnar_after.sql b/src/test/regress/sql/upgrade_columnar_after.sql index df6a7d7da..f2839645c 100644 --- a/src/test/regress/sql/upgrade_columnar_after.sql +++ b/src/test/regress/sql/upgrade_columnar_after.sql @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif - SET search_path TO upgrade_columnar, public; -- test we retained data diff --git a/src/test/regress/sql/upgrade_columnar_before.sql b/src/test/regress/sql/upgrade_columnar_before.sql index 1f83a4d5a..027a49dc2 100644 --- a/src/test/regress/sql/upgrade_columnar_before.sql +++ b/src/test/regress/sql/upgrade_columnar_before.sql @@ -1,11 +1,3 @@ -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS server_version_above_eleven -\gset -\if :server_version_above_eleven -\else -\q -\endif - -- Test if relying on topological sort of the objects, not their names, works -- fine when re-creating objects during pg_upgrade. ALTER SCHEMA public RENAME TO citus_schema; diff --git a/src/test/regress/sql/upgrade_list_citus_objects.sql b/src/test/regress/sql/upgrade_list_citus_objects.sql index 86a99a3a9..c4932c46a 100644 --- a/src/test/regress/sql/upgrade_list_citus_objects.sql +++ b/src/test/regress/sql/upgrade_list_citus_objects.sql @@ -1,7 +1,3 @@ --- print version above 11 (eg. 12 and above) -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven; - -- list all postgres objects belonging to the citus extension SELECT pg_catalog.pg_describe_object(classid, objid, 0) AS description FROM pg_catalog.pg_depend, pg_catalog.pg_extension e From bbb1da944f871759c6e6c06491e99a2518cdccbf Mon Sep 17 00:00:00 2001 From: Nils Dijk Date: Wed, 20 Jul 2022 18:56:17 +0200 Subject: [PATCH 08/10] allow ./configure to pass without checking the postgres version (#6072) For working on initial changes to postgres beta versions make the version check in `./configure` default, but optional. Normal users will still get the postgres version check error when building on other postgres versions, however, advanced users can use this flag to force configure to pass and find the compilation errors Citus would run into. Use of the flag is not advised for users not understanding what this does. --- .circleci/config.yml | 10 +++++----- configure | 35 ++++++++++++++++++++++++++++++++++- configure.ac | 8 +++++++- 3 files changed, 46 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a88e40094..f9c056a83 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -128,7 +128,7 @@ jobs: name: 'Configure' command: | chown -R circleci . - gosu circleci ./configure + gosu circleci ./configure --without-pg-version-check - run: name: 'Enable core dumps' command: | @@ -209,7 +209,7 @@ jobs: name: 'Configure' command: | chown -R circleci . - gosu circleci ./configure + gosu circleci ./configure --without-pg-version-check - run: name: 'Enable core dumps' command: | @@ -283,7 +283,7 @@ jobs: name: 'Configure' command: | chown -R circleci . - gosu circleci ./configure + gosu circleci ./configure --without-pg-version-check - run: name: 'Enable core dumps' command: | @@ -371,7 +371,7 @@ jobs: name: 'Configure' command: | chown -R circleci . - gosu circleci ./configure + gosu circleci ./configure --without-pg-version-check - run: name: 'Enable core dumps' command: | @@ -448,7 +448,7 @@ jobs: name: 'Configure' command: | chown -R circleci . - gosu circleci ./configure + gosu circleci ./configure --without-pg-version-check - run: name: 'Enable core dumps' command: | diff --git a/configure b/configure index 612180c77..1188bca69 100755 --- a/configure +++ b/configure @@ -644,6 +644,7 @@ LDFLAGS CFLAGS CC vpath_build +with_pg_version_check PATH PG_CONFIG FLEX @@ -692,6 +693,7 @@ ac_subst_files='' ac_user_opts=' enable_option_checking with_extra_version +with_pg_version_check enable_coverage with_libcurl with_reports_hostname @@ -1337,6 +1339,8 @@ Optional Packages: --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-extra-version=STRING append STRING to version + --without-pg-version-check + do not check postgres version during configure --without-libcurl do not use libcurl for anonymous statistics collection --with-reports-hostname=HOSTNAME @@ -2555,7 +2559,36 @@ if test -z "$version_num"; then as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5 fi -if test "$version_num" != '13' -a "$version_num" != '14'; then + + + +# Check whether --with-pg-version-check was given. +if test "${with_pg_version_check+set}" = set; then : + withval=$with_pg_version_check; + case $withval in + yes) + : + ;; + no) + : + ;; + *) + as_fn_error $? "no argument expected for --with-pg-version-check option" "$LINENO" 5 + ;; + esac + +else + with_pg_version_check=yes + +fi + + + + +if test "$with_pg_version_check" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5 +$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;} +elif test "$version_num" != '13' -a "$version_num" != '14'; then as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 diff --git a/configure.ac b/configure.ac index 06f78e0b7..a050c7407 100644 --- a/configure.ac +++ b/configure.ac @@ -74,7 +74,13 @@ if test -z "$version_num"; then AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.]) fi -if test "$version_num" != '13' -a "$version_num" != '14'; then +PGAC_ARG_BOOL(with, pg-version-check, yes, + [do not check postgres version during configure]) +AC_SUBST(with_pg_version_check) + +if test "$with_pg_version_check" = no; then + AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)]) +elif test "$version_num" != '13' -a "$version_num" != '14'; then AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) else AC_MSG_NOTICE([building against PostgreSQL $version_num]) From 3d569cc49a99eba3cc88727e585914d5876dabed Mon Sep 17 00:00:00 2001 From: Nitish Upreti Date: Wed, 20 Jul 2022 12:24:50 -0700 Subject: [PATCH 09/10] Shard Split support for Columnar and Partitioned Table (#6067) DESCRIPTION: This PR extends support for Partitioned and Columnar tables in blocking 'citus_split_shard_by_split_points' workflow. Columnar Support : No special handling required. Just removing checks that fails split for columnar table and adding test coverage. Partitioned Table Support : Skip copying of parent table as they are empty, The partitions contain data and are treated as co-located shards that will be copied separately. Attach partitions to parent on destination after inserting new shard metadata and before creating foreign key constraints. MISC: Fix Bug #4949 where Blocking shard moves fails if there is a foreign key between partitioned distributed tables (from child to parent). TEST: Added new test 'citus_split_shards_columnar_partitioned' for splitting 'partitioned' and 'columnar + partitioned' table. Added new test 'shard_move_constraints_blocking' to add coverage for shard move bug fix. Updated test 'citus_split_shard_by_split_points_negative' to allow columnar and partitioned table. --- .../distributed/operations/repair_shards.c | 90 +- .../distributed/operations/shard_split.c | 124 ++- src/test/regress/enterprise_schedule | 1 + ...s_split_shard_by_split_points_negative.out | 31 - ...citus_split_shard_columnar_partitioned.out | 812 ++++++++++++++++++ .../shard_move_constraints_blocking.out | 362 ++++++++ src/test/regress/split_schedule | 3 + ...s_split_shard_by_split_points_negative.sql | 22 - ...citus_split_shard_columnar_partitioned.sql | 294 +++++++ .../sql/shard_move_constraints_blocking.sql | 201 +++++ 10 files changed, 1819 insertions(+), 121 deletions(-) create mode 100644 src/test/regress/expected/citus_split_shard_columnar_partitioned.out create mode 100644 src/test/regress/expected/shard_move_constraints_blocking.out create mode 100644 src/test/regress/sql/citus_split_shard_columnar_partitioned.sql create mode 100644 src/test/regress/sql/shard_move_constraints_blocking.sql diff --git a/src/backend/distributed/operations/repair_shards.c b/src/backend/distributed/operations/repair_shards.c index f29f0a75a..7db6d8289 100644 --- a/src/backend/distributed/operations/repair_shards.c +++ b/src/backend/distributed/operations/repair_shards.c @@ -53,6 +53,18 @@ #include "utils/rel.h" #include "utils/syscache.h" +/* local type declarations */ + +/* + * ShardInterval along with to be executed + * DDL command list. + */ +typedef struct ShardCommandList +{ + ShardInterval *shardInterval; + List *ddlCommandList; +} ShardCommandList; + /* local function forward declarations */ static void VerifyTablesHaveReplicaIdentity(List *colocatedTableList); static bool RelationCanPublishAllModifications(Oid relationId); @@ -114,6 +126,8 @@ static List * CopyShardContentsCommandList(ShardInterval *shardInterval, static List * PostLoadShardCreationCommandList(ShardInterval *shardInterval, const char *sourceNodeName, int32 sourceNodePort); +static ShardCommandList * CreateShardCommandList(ShardInterval *shardInterval, + List *ddlCommandList); /* declarations for dynamic loading */ @@ -1129,6 +1143,22 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa } +/* + * CreateShardCommandList creates a struct for shard interval + * along with DDL commands to be executed. + */ +static ShardCommandList * +CreateShardCommandList(ShardInterval *shardInterval, List *ddlCommandList) +{ + ShardCommandList *shardCommandList = palloc0( + sizeof(ShardCommandList)); + shardCommandList->shardInterval = shardInterval; + shardCommandList->ddlCommandList = ddlCommandList; + + return shardCommandList; +} + + /* * CopyShardTablesViaBlockWrites copies a shard along with its co-located shards * from a source node to target node via COPY command. While the command is in @@ -1187,10 +1217,28 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, } /* - * Once all shards are created, we can recreate relationships between shards. - * - * Iterate through the colocated shards and create the foreign constraints and - * attach child tables to their parents in a partitioning hierarchy. + * Once all shards are copied, we can recreate relationships between shards. + * Create DDL commands to Attach child tables to their parents in a partitioning hierarchy. + */ + List *shardIntervalWithDDCommandsList = NIL; + foreach_ptr(shardInterval, shardIntervalList) + { + if (PartitionTable(shardInterval->relationId)) + { + char *attachPartitionCommand = + GenerateAttachShardPartitionCommand(shardInterval); + + ShardCommandList *shardCommandList = CreateShardCommandList( + shardInterval, + list_make1(attachPartitionCommand)); + shardIntervalWithDDCommandsList = lappend(shardIntervalWithDDCommandsList, + shardCommandList); + } + } + + /* + * Iterate through the colocated shards and create DDL commamnds + * to create the foreign constraints. */ foreach_ptr(shardInterval, shardIntervalList) { @@ -1201,25 +1249,25 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, &shardForeignConstraintCommandList, &referenceTableForeignConstraintList); - List *commandList = NIL; - commandList = list_concat(commandList, shardForeignConstraintCommandList); - commandList = list_concat(commandList, referenceTableForeignConstraintList); - - if (PartitionTable(shardInterval->relationId)) - { - char *attachPartitionCommand = - GenerateAttachShardPartitionCommand(shardInterval); - - commandList = lappend(commandList, attachPartitionCommand); - } - - char *tableOwner = TableOwner(shardInterval->relationId); - SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, - tableOwner, commandList); - - MemoryContextReset(localContext); + ShardCommandList *shardCommandList = CreateShardCommandList( + shardInterval, + list_concat(shardForeignConstraintCommandList, + referenceTableForeignConstraintList)); + shardIntervalWithDDCommandsList = lappend(shardIntervalWithDDCommandsList, + shardCommandList); } + /* Now execute the Partitioning & Foreign constraints creation commads. */ + ShardCommandList *shardCommandList = NULL; + foreach_ptr(shardCommandList, shardIntervalWithDDCommandsList) + { + char *tableOwner = TableOwner(shardCommandList->shardInterval->relationId); + SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, + tableOwner, + shardCommandList->ddlCommandList); + } + + MemoryContextReset(localContext); MemoryContextSwitchTo(oldContext); } diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index 93231797d..d39780e0d 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -82,6 +82,8 @@ static StringInfo CreateSplitCopyCommand(ShardInterval *sourceShardSplitInterval List *workersForPlacementList); static void InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, List *workersForPlacementList); +static void CreatePartitioningHierarchy(List *shardGroupSplitIntervalListList, + List *workersForPlacementList); static void CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, List *workersForPlacementList); static void TryDropSplitShardsOnFailure(HTAB *mapOfShardToPlacementCreatedByWorkflow); @@ -135,28 +137,6 @@ ErrorIfCannotSplitShard(SplitOperation splitOperation, ShardInterval *sourceShar errdetail("Splitting shards backed by foreign tables " "is not supported."))); } - - /* - * At the moment, we do not support copying a shard if that shard's - * relation is in a colocation group with a partitioned table or partition. - */ - if (PartitionedTable(colocatedTableId)) - { - char *sourceRelationName = get_rel_name(relationId); - char *colocatedRelationName = get_rel_name(colocatedTableId); - - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot %s of '%s', because it " - "is a partitioned table", - SplitOperationName[splitOperation], - colocatedRelationName), - errdetail("In colocation group of '%s', a partitioned " - "relation exists: '%s'. Citus does not support " - "%s of partitioned tables.", - sourceRelationName, - colocatedRelationName, - SplitOperationName[splitOperation]))); - } } /* check shards with inactive placements */ @@ -213,15 +193,6 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, SplitTargetName[splitOperation]))); } - if (extern_IsColumnarTableAmTable(shardIntervalToSplit->relationId)) - { - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Cannot %s %s as operation " - "is not supported for Columnar tables.", - SplitOperationName[splitOperation], - SplitTargetName[splitOperation]))); - } - uint32 relationReplicationFactor = TableShardReplicationFactor( shardIntervalToSplit->relationId); if (relationReplicationFactor > 1) @@ -414,8 +385,8 @@ SplitShard(SplitMode splitMode, /* - * ShardIntervalHashCode computes the hash code for a shard from the - * placement's shard id. + * ShardIntervalHashCode computes the hash code for a Shardinterval using + * shardId. */ static uint32 ShardIntervalHashCode(const void *key, Size keySize) @@ -526,6 +497,12 @@ BlockingShardSplit(SplitOperation splitOperation, shardGroupSplitIntervalListList, workersForPlacementList); + /* + * Up to this point, we performed various subtransactions that may + * require additional clean-up in case of failure. The remaining operations + * going forward are part of the same distributed transaction. + */ + /* * Drop old shards and delete related metadata. Have to do that before * creating the new shard metadata, because there's cross-checks @@ -537,6 +514,10 @@ BlockingShardSplit(SplitOperation splitOperation, InsertSplitChildrenShardMetadata(shardGroupSplitIntervalListList, workersForPlacementList); + /* create partitioning hierarchy, if any */ + CreatePartitioningHierarchy(shardGroupSplitIntervalListList, + workersForPlacementList); + /* * Create foreign keys if exists after the metadata changes happening in * DropShardList() and InsertSplitChildrenShardMetadata() because the foreign @@ -719,23 +700,32 @@ DoSplitCopy(WorkerNode *sourceShardNode, List *sourceColocatedShardIntervalList, forboth_ptr(sourceShardIntervalToCopy, sourceColocatedShardIntervalList, splitShardIntervalList, shardGroupSplitIntervalListList) { - StringInfo splitCopyUdfCommand = CreateSplitCopyCommand(sourceShardIntervalToCopy, - splitShardIntervalList, - destinationWorkerNodesList); + /* + * Skip copying data for partitioned tables, because they contain no + * data themselves. Their partitions do contain data, but those are + * different colocated shards that will be copied seperately. + */ + if (!PartitionedTable(sourceShardIntervalToCopy->relationId)) + { + StringInfo splitCopyUdfCommand = CreateSplitCopyCommand( + sourceShardIntervalToCopy, + splitShardIntervalList, + destinationWorkerNodesList); - Task *splitCopyTask = CreateBasicTask( - sourceShardIntervalToCopy->shardId, /* jobId */ - taskId, - READ_TASK, - splitCopyUdfCommand->data); + Task *splitCopyTask = CreateBasicTask( + INVALID_JOB_ID, + taskId, + READ_TASK, + splitCopyUdfCommand->data); - ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); - SetPlacementNodeMetadata(taskPlacement, sourceShardNode); + ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(taskPlacement, sourceShardNode); - splitCopyTask->taskPlacementList = list_make1(taskPlacement); + splitCopyTask->taskPlacementList = list_make1(taskPlacement); - splitCopyTaskList = lappend(splitCopyTaskList, splitCopyTask); - taskId++; + splitCopyTaskList = lappend(splitCopyTaskList, splitCopyTask); + taskId++; + } } ExecuteTaskListOutsideTransaction(ROW_MODIFY_NONE, splitCopyTaskList, @@ -955,6 +945,46 @@ InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, } +/* + * CreatePartitioningHierarchy creates the partitioning + * hierarchy between the shardList, if any. + */ +static void +CreatePartitioningHierarchy(List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + /* Create partition heirarchy between shards */ + List *shardIntervalList = NIL; + + /* + * Iterate over all the shards in the shard group. + */ + foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) + { + ShardInterval *shardInterval = NULL; + WorkerNode *workerPlacementNode = NULL; + + /* + * Iterate on split shards list for a given shard and create constraints. + */ + forboth_ptr(shardInterval, shardIntervalList, workerPlacementNode, + workersForPlacementList) + { + if (PartitionTable(shardInterval->relationId)) + { + char *attachPartitionCommand = + GenerateAttachShardPartitionCommand(shardInterval); + + SendCommandToWorker( + workerPlacementNode->workerName, + workerPlacementNode->workerPort, + attachPartitionCommand); + } + } + } +} + + /* * Create foreign key constraints on the split children shards. */ @@ -1074,7 +1104,7 @@ DropShardList(List *shardIntervalList) /* - * In case of failure, DropShardPlacementList drops shard placements and their metadata from both the + * In case of failure, TryDropSplitShardsOnFailure drops in-progress shard placements from both the * coordinator and mx nodes. */ static void diff --git a/src/test/regress/enterprise_schedule b/src/test/regress/enterprise_schedule index 476abb0fe..3acbc3d26 100644 --- a/src/test/regress/enterprise_schedule +++ b/src/test/regress/enterprise_schedule @@ -34,5 +34,6 @@ test: multi_alter_table_row_level_security test: multi_alter_table_row_level_security_escape test: stat_statements test: shard_move_constraints +test: shard_move_constraints_blocking test: logical_rep_consistency test: check_mx diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out index 5986fa74b..2445c1e18 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out @@ -140,34 +140,3 @@ SELECT citus_split_shard_by_split_points( ARRAY['-1073741826'], ARRAY[:worker_1_node, :worker_2_node]); ERROR: Operation split not supported for shard as replication factor '2' is greater than 1. --- Create distributed table with columnar type. -SET citus.next_shard_id TO 51271400; -CREATE TABLE table_to_split_columnar (id bigserial PRIMARY KEY, value char) USING columnar; -SELECT create_distributed_table('table_to_split_columnar','id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- UDF fails for columnar table. -SELECT citus_split_shard_by_split_points( - 51271400, - ARRAY['-1073741826'], - ARRAY[:worker_1_node, :worker_2_node]); -ERROR: Cannot split shard as operation is not supported for Columnar tables. --- Create distributed table which is partitioned. -SET citus.next_shard_id TO 51271900; -CREATE TABLE table_to_split_partitioned(id integer, dt date) PARTITION BY RANGE(dt); -SELECT create_distributed_table('table_to_split_partitioned','id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- UDF fails for partitioned table. -SELECT citus_split_shard_by_split_points( - 51271900, - ARRAY['-1073741826'], - ARRAY[:worker_1_node, :worker_2_node]); -ERROR: cannot split of 'table_to_split_partitioned', because it is a partitioned table -DETAIL: In colocation group of 'table_to_split_partitioned', a partitioned relation exists: 'table_to_split_partitioned'. Citus does not support split of partitioned tables. diff --git a/src/test/regress/expected/citus_split_shard_columnar_partitioned.out b/src/test/regress/expected/citus_split_shard_columnar_partitioned.out new file mode 100644 index 000000000..b1cebc392 --- /dev/null +++ b/src/test/regress/expected/citus_split_shard_columnar_partitioned.out @@ -0,0 +1,812 @@ +CREATE SCHEMA "citus_split_test_schema_columnar_partitioned"; +SET search_path TO "citus_split_test_schema_columnar_partitioned"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. + CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + PRIMARY KEY (measureid, eventdatetime, measure_data)) + PARTITION BY RANGE(eventdatetime); + -- Table access method is specified on child tables + CREATE TABLE sensorscolumnar( + measureid integer, + eventdatetime date, + measure_data jsonb, + PRIMARY KEY (measureid, eventdatetime, measure_data)) + PARTITION BY RANGE(eventdatetime); + -- Create Partitions of table 'sensors'. + CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); + CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); + CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); + CREATE TABLE sensorscolumnar_old PARTITION OF sensorscolumnar FOR VALUES FROM ('2000-01-01') TO ('2020-01-01') USING COLUMNAR; + CREATE TABLE sensorscolumnar_2020_01_01 PARTITION OF sensorscolumnar FOR VALUES FROM ('2020-01-01') TO ('2020-02-01') USING COLUMNAR; + CREATE TABLE sensorscolumnar_news PARTITION OF sensorscolumnar FOR VALUES FROM ('2020-05-01') TO ('2025-01-01') USING COLUMNAR; + -- Create index on parent and child partitions. + CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); + CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); + CREATE INDEX index_on_parent_columnar ON sensorscolumnar(lower(measureid::text)); + CREATE INDEX index_on_child_columnar ON sensorscolumnar_2020_01_01(lower(measure_data::text)); + ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; + ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; + ALTER INDEX index_on_parent_columnar ALTER COLUMN 1 SET STATISTICS 1000; + ALTER INDEX index_on_child_columnar ALTER COLUMN 1 SET STATISTICS 1000; + -- Create statistics on parent and child partitions. + CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; + CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; + CREATE STATISTICS s1_c (dependencies) ON measureid, eventdatetime FROM sensorscolumnar; + CREATE STATISTICS s2_c (dependencies) ON measureid, eventdatetime FROM sensorscolumnar_2020_01_01; + CLUSTER sensors_2020_01_01 USING index_on_child; + SELECT create_distributed_table('sensors', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + SELECT create_distributed_table('sensorscolumnar', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + -- create colocated distributed tables + CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); + SELECT create_distributed_table('colocated_dist_table', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + CLUSTER colocated_dist_table USING colocated_dist_table_pkey; + CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) + PARTITION BY RANGE(eventdatetime); + CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); + SELECT create_distributed_table('colocated_partitioned_table', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + + CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; + -- create reference tables + CREATE TABLE reference_table (measureid integer PRIMARY KEY); + SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + + SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + INNER JOIN pg_catalog.pg_namespace ns ON cls.relnamespace = ns.oid + WHERE node.noderole = 'primary' AND ns.nspname = 'citus_split_test_schema_columnar_partitioned' + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8970000 | sensors | -2147483648 | 2147483647 | localhost | 57637 + 8970004 | sensorscolumnar | -2147483648 | 2147483647 | localhost | 57637 + 8970001 | sensors_old | -2147483648 | 2147483647 | localhost | 57637 + 8970002 | sensors_2020_01_01 | -2147483648 | 2147483647 | localhost | 57637 + 8970003 | sensors_news | -2147483648 | 2147483647 | localhost | 57637 + 8970005 | sensorscolumnar_old | -2147483648 | 2147483647 | localhost | 57637 + 8970006 | sensorscolumnar_2020_01_01 | -2147483648 | 2147483647 | localhost | 57637 + 8970007 | sensorscolumnar_news | -2147483648 | 2147483647 | localhost | 57637 + 8970008 | colocated_dist_table | -2147483648 | 2147483647 | localhost | 57637 + 8970009 | colocated_partitioned_table | -2147483648 | 2147483647 | localhost | 57637 + 8970010 | colocated_partitioned_table_2020_01_01 | -2147483648 | 2147483647 | localhost | 57637 + 8970011 | reference_table | | | localhost | 57637 + 8970011 | reference_table | | | localhost | 57638 +(13 rows) + +-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. +-- BEGIN: Create constraints for tables. + -- from parent to regular dist + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + -- from parent to parent + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); + -- from parent to child + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + -- from child to regular dist + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + -- from child to parent + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_parent FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table(measureid,eventdatetime); + -- from child to child + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_child FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid,eventdatetime); + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + -- No support for foreign keys, unique constraints, or exclusion constraints in columnar tables. + -- Please see: https://github.com/citusdata/citus/tree/main/src/backend/columnar/README.md +-- END: Create constraints for tables. +-- BEGIN: Load data into tables + INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; + INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; + INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; + INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; + INSERT INTO sensorscolumnar SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +-- END: Load data into tables +-- BEGIN: Show the current state on workers +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_2020_01_01_8970002 | fkey_from_child_to_child_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_2020_01_01_8970002 | fkey_from_child_to_dist_8970002 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid) + sensors_2020_01_01_8970002 | fkey_from_child_to_parent_8970002 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid) + sensors_2020_01_01_8970002 | fkey_from_child_to_ref_8970002 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8970002 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_2020_01_01_8970002 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid) + sensors_2020_01_01_8970002 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid) + sensors_2020_01_01_8970002 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8970002 | sensors_2020_01_01_8970002_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_8970000 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_8970000 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid) + sensors_8970000 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid) + sensors_8970000 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_8970000 | sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_news_8970003 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_news_8970003 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid) + sensors_news_8970003 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid) + sensors_news_8970003 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_old_8970001 | fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970010(eventdatetime, measureid) + sensors_old_8970001 | fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970008(measureid) + sensors_old_8970001 | fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970009(eventdatetime, measureid) + sensors_old_8970001 | fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) +(22 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + colocated_dist_table_8970008 | CREATE UNIQUE INDEX colocated_dist_table_pkey_8970008 ON citus_split_test_schema_columnar_partitioned.colocated_dist_table_8970008 USING btree (measureid) + colocated_partitioned_table_2020_01_01_8970010 | CREATE UNIQUE INDEX colocated_partitioned_table_2020_01_01_pkey_8970010 ON citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_2020_01_01_8970010 USING btree (measureid, eventdatetime) + colocated_partitioned_table_8970009 | CREATE UNIQUE INDEX colocated_partitioned_table_pkey_8970009 ON ONLY citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_8970009 USING btree (measureid, eventdatetime) + reference_table_8970011 | CREATE UNIQUE INDEX reference_table_pkey_8970011 ON citus_split_test_schema_columnar_partitioned.reference_table_8970011 USING btree (measureid) + sensors_2020_01_01_8970002 | CREATE INDEX index_on_child_8970002 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8970002 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8970002 | CREATE INDEX sensors_2020_01_01_lower_idx_8970002 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8970002 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8970002 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8970002 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8970002 USING btree (measureid, eventdatetime, measure_data) + sensors_8970000 | CREATE INDEX index_on_parent_8970000 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8970000 USING btree (lower((measureid)::text)) + sensors_8970000 | CREATE UNIQUE INDEX sensors_pkey_8970000 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8970000 USING btree (measureid, eventdatetime, measure_data) + sensors_news_8970003 | CREATE INDEX sensors_news_lower_idx_8970003 ON citus_split_test_schema_columnar_partitioned.sensors_news_8970003 USING btree (lower((measureid)::text)) + sensors_news_8970003 | CREATE UNIQUE INDEX sensors_news_pkey_8970003 ON citus_split_test_schema_columnar_partitioned.sensors_news_8970003 USING btree (measureid, eventdatetime, measure_data) + sensors_old_8970001 | CREATE INDEX sensors_old_lower_idx_8970001 ON citus_split_test_schema_columnar_partitioned.sensors_old_8970001 USING btree (lower((measureid)::text)) + sensors_old_8970001 | CREATE UNIQUE INDEX sensors_old_pkey_8970001 ON citus_split_test_schema_columnar_partitioned.sensors_old_8970001 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_2020_01_01_8970006 | CREATE INDEX index_on_child_columnar_8970006 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8970006 USING btree (lower((measure_data)::text)) + sensorscolumnar_2020_01_01_8970006 | CREATE INDEX sensorscolumnar_2020_01_01_lower_idx_8970006 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8970006 USING btree (lower((measureid)::text)) + sensorscolumnar_2020_01_01_8970006 | CREATE UNIQUE INDEX sensorscolumnar_2020_01_01_pkey_8970006 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8970006 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_8970004 | CREATE INDEX index_on_parent_columnar_8970004 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8970004 USING btree (lower((measureid)::text)) + sensorscolumnar_8970004 | CREATE UNIQUE INDEX sensorscolumnar_pkey_8970004 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8970004 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_news_8970007 | CREATE INDEX sensorscolumnar_news_lower_idx_8970007 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8970007 USING btree (lower((measureid)::text)) + sensorscolumnar_news_8970007 | CREATE UNIQUE INDEX sensorscolumnar_news_pkey_8970007 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8970007 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_old_8970005 | CREATE INDEX sensorscolumnar_old_lower_idx_8970005 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8970005 USING btree (lower((measureid)::text)) + sensorscolumnar_old_8970005 | CREATE UNIQUE INDEX sensorscolumnar_old_pkey_8970005 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8970005 USING btree (measureid, eventdatetime, measure_data) +(22 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8970000 + s1_c + s1_c_8970004 + s2 + s2_8970002 + s2_c + s2_c_8970006 +(8 rows) + + \c - - - :worker_2_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- +(0 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + reference_table_8970011 | CREATE UNIQUE INDEX reference_table_pkey_8970011 ON citus_split_test_schema_columnar_partitioned.reference_table_8970011 USING btree (measureid) +(1 row) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_c + s2 + s2_c +(4 rows) + +-- END: Show the current state on workers +-- BEGIN: Split a shard along its co-located shards +\c - - - :master_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.next_shard_id TO 8999000; + SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset + SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset + SELECT pg_catalog.citus_split_shard_by_split_points( + 8970000, + ARRAY['-2120000000'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +-- END: Split a shard along its co-located shards +-- BEGIN: Validate Shard Info and Data + SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + INNER JOIN pg_catalog.pg_namespace ns ON cls.relnamespace = ns.oid + WHERE node.noderole = 'primary' AND ns.nspname = 'citus_split_test_schema_columnar_partitioned' + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8999000 | sensors | -2147483648 | -2120000000 | localhost | 57637 + 8999001 | sensors | -2119999999 | 2147483647 | localhost | 57638 + 8999008 | sensorscolumnar | -2147483648 | -2120000000 | localhost | 57637 + 8999009 | sensorscolumnar | -2119999999 | 2147483647 | localhost | 57638 + 8999002 | sensors_old | -2147483648 | -2120000000 | localhost | 57637 + 8999003 | sensors_old | -2119999999 | 2147483647 | localhost | 57638 + 8999004 | sensors_2020_01_01 | -2147483648 | -2120000000 | localhost | 57637 + 8999005 | sensors_2020_01_01 | -2119999999 | 2147483647 | localhost | 57638 + 8999006 | sensors_news | -2147483648 | -2120000000 | localhost | 57637 + 8999007 | sensors_news | -2119999999 | 2147483647 | localhost | 57638 + 8999010 | sensorscolumnar_old | -2147483648 | -2120000000 | localhost | 57637 + 8999011 | sensorscolumnar_old | -2119999999 | 2147483647 | localhost | 57638 + 8999012 | sensorscolumnar_2020_01_01 | -2147483648 | -2120000000 | localhost | 57637 + 8999013 | sensorscolumnar_2020_01_01 | -2119999999 | 2147483647 | localhost | 57638 + 8999014 | sensorscolumnar_news | -2147483648 | -2120000000 | localhost | 57637 + 8999015 | sensorscolumnar_news | -2119999999 | 2147483647 | localhost | 57638 + 8999016 | colocated_dist_table | -2147483648 | -2120000000 | localhost | 57637 + 8999017 | colocated_dist_table | -2119999999 | 2147483647 | localhost | 57638 + 8999018 | colocated_partitioned_table | -2147483648 | -2120000000 | localhost | 57637 + 8999019 | colocated_partitioned_table | -2119999999 | 2147483647 | localhost | 57638 + 8999020 | colocated_partitioned_table_2020_01_01 | -2147483648 | -2120000000 | localhost | 57637 + 8999021 | colocated_partitioned_table_2020_01_01 | -2119999999 | 2147483647 | localhost | 57638 + 8970011 | reference_table | | | localhost | 57637 + 8970011 | reference_table | | | localhost | 57638 +(24 rows) + + SELECT count(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM colocated_partitioned_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM sensorscolumnar; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- END: Validate Shard Info and Data +-- BEGIN: Show the updated state on workers + \c - - - :worker_1_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_2020_01_01_8999004 | fkey_from_child_to_child_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_2020_01_01_8999004 | fkey_from_child_to_dist_8999004 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid) + sensors_2020_01_01_8999004 | fkey_from_child_to_parent_8999004 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid) + sensors_2020_01_01_8999004 | fkey_from_child_to_ref_8999004 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999004 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_2020_01_01_8999004 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid) + sensors_2020_01_01_8999004 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid) + sensors_2020_01_01_8999004 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999004 | sensors_2020_01_01_8999004_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_8999000 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_8999000 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid) + sensors_8999000 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid) + sensors_8999000 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_8999000 | sensors_8999000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_news_8999006 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_news_8999006 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid) + sensors_news_8999006 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid) + sensors_news_8999006 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_old_8999002 | fkey_from_parent_to_child_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999020(eventdatetime, measureid) + sensors_old_8999002 | fkey_from_parent_to_dist_8999000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999016(measureid) + sensors_old_8999002 | fkey_from_parent_to_parent_8999000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999018(eventdatetime, measureid) + sensors_old_8999002 | fkey_from_parent_to_ref_8999000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) +(22 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + colocated_dist_table_8999016 | CREATE UNIQUE INDEX colocated_dist_table_pkey_8999016 ON citus_split_test_schema_columnar_partitioned.colocated_dist_table_8999016 USING btree (measureid) + colocated_partitioned_table_2020_01_01_8999020 | CREATE UNIQUE INDEX colocated_partitioned_table_2020_01_01_pkey_8999020 ON citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_2020_01_01_8999020 USING btree (measureid, eventdatetime) + colocated_partitioned_table_8999018 | CREATE UNIQUE INDEX colocated_partitioned_table_pkey_8999018 ON ONLY citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_8999018 USING btree (measureid, eventdatetime) + reference_table_8970011 | CREATE UNIQUE INDEX reference_table_pkey_8970011 ON citus_split_test_schema_columnar_partitioned.reference_table_8970011 USING btree (measureid) + sensors_2020_01_01_8999004 | CREATE INDEX index_on_child_8999004 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999004 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8999004 | CREATE INDEX sensors_2020_01_01_lower_idx_8999004 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999004 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8999004 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8999004 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999004 USING btree (measureid, eventdatetime, measure_data) + sensors_8999000 | CREATE INDEX index_on_parent_8999000 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999000 USING btree (lower((measureid)::text)) + sensors_8999000 | CREATE UNIQUE INDEX sensors_pkey_8999000 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999000 USING btree (measureid, eventdatetime, measure_data) + sensors_news_8999006 | CREATE INDEX sensors_news_lower_idx_8999006 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999006 USING btree (lower((measureid)::text)) + sensors_news_8999006 | CREATE UNIQUE INDEX sensors_news_pkey_8999006 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999006 USING btree (measureid, eventdatetime, measure_data) + sensors_old_8999002 | CREATE INDEX sensors_old_lower_idx_8999002 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999002 USING btree (lower((measureid)::text)) + sensors_old_8999002 | CREATE UNIQUE INDEX sensors_old_pkey_8999002 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999002 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_2020_01_01_8999012 | CREATE INDEX index_on_child_columnar_8999012 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999012 USING btree (lower((measure_data)::text)) + sensorscolumnar_2020_01_01_8999012 | CREATE INDEX sensorscolumnar_2020_01_01_lower_idx_8999012 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999012 USING btree (lower((measureid)::text)) + sensorscolumnar_2020_01_01_8999012 | CREATE UNIQUE INDEX sensorscolumnar_2020_01_01_pkey_8999012 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999012 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_8999008 | CREATE INDEX index_on_parent_columnar_8999008 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999008 USING btree (lower((measureid)::text)) + sensorscolumnar_8999008 | CREATE UNIQUE INDEX sensorscolumnar_pkey_8999008 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999008 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_news_8999014 | CREATE INDEX sensorscolumnar_news_lower_idx_8999014 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999014 USING btree (lower((measureid)::text)) + sensorscolumnar_news_8999014 | CREATE UNIQUE INDEX sensorscolumnar_news_pkey_8999014 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999014 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_old_8999010 | CREATE INDEX sensorscolumnar_old_lower_idx_8999010 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999010 USING btree (lower((measureid)::text)) + sensorscolumnar_old_8999010 | CREATE UNIQUE INDEX sensorscolumnar_old_pkey_8999010 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999010 USING btree (measureid, eventdatetime, measure_data) +(22 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8999000 + s1_c + s1_c_8999008 + s2 + s2_8999004 + s2_c + s2_c_8999012 +(8 rows) + + \c - - - :worker_2_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999005 | sensors_2020_01_01_8999005_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_8999001 | sensors_8999001_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) +(22 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + colocated_dist_table_8999017 | CREATE UNIQUE INDEX colocated_dist_table_pkey_8999017 ON citus_split_test_schema_columnar_partitioned.colocated_dist_table_8999017 USING btree (measureid) + colocated_partitioned_table_2020_01_01_8999021 | CREATE UNIQUE INDEX colocated_partitioned_table_2020_01_01_pkey_8999021 ON citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_2020_01_01_8999021 USING btree (measureid, eventdatetime) + colocated_partitioned_table_8999019 | CREATE UNIQUE INDEX colocated_partitioned_table_pkey_8999019 ON ONLY citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_8999019 USING btree (measureid, eventdatetime) + reference_table_8970011 | CREATE UNIQUE INDEX reference_table_pkey_8970011 ON citus_split_test_schema_columnar_partitioned.reference_table_8970011 USING btree (measureid) + sensors_2020_01_01_8999005 | CREATE INDEX index_on_child_8999005 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999005 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8999005 | CREATE INDEX sensors_2020_01_01_lower_idx_8999005 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999005 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8999005 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8999005 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999005 USING btree (measureid, eventdatetime, measure_data) + sensors_8999001 | CREATE INDEX index_on_parent_8999001 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999001 USING btree (lower((measureid)::text)) + sensors_8999001 | CREATE UNIQUE INDEX sensors_pkey_8999001 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999001 USING btree (measureid, eventdatetime, measure_data) + sensors_news_8999007 | CREATE INDEX sensors_news_lower_idx_8999007 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999007 USING btree (lower((measureid)::text)) + sensors_news_8999007 | CREATE UNIQUE INDEX sensors_news_pkey_8999007 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999007 USING btree (measureid, eventdatetime, measure_data) + sensors_old_8999003 | CREATE INDEX sensors_old_lower_idx_8999003 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999003 USING btree (lower((measureid)::text)) + sensors_old_8999003 | CREATE UNIQUE INDEX sensors_old_pkey_8999003 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999003 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_2020_01_01_8999013 | CREATE INDEX index_on_child_columnar_8999013 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999013 USING btree (lower((measure_data)::text)) + sensorscolumnar_2020_01_01_8999013 | CREATE INDEX sensorscolumnar_2020_01_01_lower_idx_8999013 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999013 USING btree (lower((measureid)::text)) + sensorscolumnar_2020_01_01_8999013 | CREATE UNIQUE INDEX sensorscolumnar_2020_01_01_pkey_8999013 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999013 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_8999009 | CREATE INDEX index_on_parent_columnar_8999009 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999009 USING btree (lower((measureid)::text)) + sensorscolumnar_8999009 | CREATE UNIQUE INDEX sensorscolumnar_pkey_8999009 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999009 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_news_8999015 | CREATE INDEX sensorscolumnar_news_lower_idx_8999015 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999015 USING btree (lower((measureid)::text)) + sensorscolumnar_news_8999015 | CREATE UNIQUE INDEX sensorscolumnar_news_pkey_8999015 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999015 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_old_8999011 | CREATE INDEX sensorscolumnar_old_lower_idx_8999011 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999011 USING btree (lower((measureid)::text)) + sensorscolumnar_old_8999011 | CREATE UNIQUE INDEX sensorscolumnar_old_pkey_8999011 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999011 USING btree (measureid, eventdatetime, measure_data) +(22 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8999001 + s1_c + s1_c_8999009 + s2 + s2_8999005 + s2_c + s2_c_8999013 +(8 rows) + +-- END: Show the updated state on workers +-- BEGIN: Split a partition table directly +\c - - - :master_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.next_shard_id TO 8999100; + SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset + SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset + SELECT pg_catalog.citus_split_shard_by_split_points( + 8999002, -- sensors_old + ARRAY['-2127770000'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +-- END: Split a partition table directly +-- BEGIN: Validate Shard Info and Data + SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + INNER JOIN pg_catalog.pg_namespace ns ON cls.relnamespace = ns.oid + WHERE node.noderole = 'primary' AND ns.nspname = 'citus_split_test_schema_columnar_partitioned' + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8999100 | sensors | -2147483648 | -2127770000 | localhost | 57637 + 8999101 | sensors | -2127769999 | -2120000000 | localhost | 57638 + 8999001 | sensors | -2119999999 | 2147483647 | localhost | 57638 + 8999108 | sensorscolumnar | -2147483648 | -2127770000 | localhost | 57637 + 8999109 | sensorscolumnar | -2127769999 | -2120000000 | localhost | 57638 + 8999009 | sensorscolumnar | -2119999999 | 2147483647 | localhost | 57638 + 8999102 | sensors_old | -2147483648 | -2127770000 | localhost | 57637 + 8999103 | sensors_old | -2127769999 | -2120000000 | localhost | 57638 + 8999003 | sensors_old | -2119999999 | 2147483647 | localhost | 57638 + 8999104 | sensors_2020_01_01 | -2147483648 | -2127770000 | localhost | 57637 + 8999105 | sensors_2020_01_01 | -2127769999 | -2120000000 | localhost | 57638 + 8999005 | sensors_2020_01_01 | -2119999999 | 2147483647 | localhost | 57638 + 8999106 | sensors_news | -2147483648 | -2127770000 | localhost | 57637 + 8999107 | sensors_news | -2127769999 | -2120000000 | localhost | 57638 + 8999007 | sensors_news | -2119999999 | 2147483647 | localhost | 57638 + 8999110 | sensorscolumnar_old | -2147483648 | -2127770000 | localhost | 57637 + 8999111 | sensorscolumnar_old | -2127769999 | -2120000000 | localhost | 57638 + 8999011 | sensorscolumnar_old | -2119999999 | 2147483647 | localhost | 57638 + 8999112 | sensorscolumnar_2020_01_01 | -2147483648 | -2127770000 | localhost | 57637 + 8999113 | sensorscolumnar_2020_01_01 | -2127769999 | -2120000000 | localhost | 57638 + 8999013 | sensorscolumnar_2020_01_01 | -2119999999 | 2147483647 | localhost | 57638 + 8999114 | sensorscolumnar_news | -2147483648 | -2127770000 | localhost | 57637 + 8999115 | sensorscolumnar_news | -2127769999 | -2120000000 | localhost | 57638 + 8999015 | sensorscolumnar_news | -2119999999 | 2147483647 | localhost | 57638 + 8999116 | colocated_dist_table | -2147483648 | -2127770000 | localhost | 57637 + 8999117 | colocated_dist_table | -2127769999 | -2120000000 | localhost | 57638 + 8999017 | colocated_dist_table | -2119999999 | 2147483647 | localhost | 57638 + 8999118 | colocated_partitioned_table | -2147483648 | -2127770000 | localhost | 57637 + 8999119 | colocated_partitioned_table | -2127769999 | -2120000000 | localhost | 57638 + 8999019 | colocated_partitioned_table | -2119999999 | 2147483647 | localhost | 57638 + 8999120 | colocated_partitioned_table_2020_01_01 | -2147483648 | -2127770000 | localhost | 57637 + 8999121 | colocated_partitioned_table_2020_01_01 | -2127769999 | -2120000000 | localhost | 57638 + 8999021 | colocated_partitioned_table_2020_01_01 | -2119999999 | 2147483647 | localhost | 57638 + 8970011 | reference_table | | | localhost | 57637 + 8970011 | reference_table | | | localhost | 57638 +(35 rows) + + SELECT count(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM colocated_partitioned_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + + SELECT count(*) FROM sensorscolumnar; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- END: Validate Shard Info and Data +-- BEGIN: Show the updated state on workers + \c - - - :worker_1_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_2020_01_01_8999104 | fkey_from_child_to_child_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_2020_01_01_8999104 | fkey_from_child_to_dist_8999104 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid) + sensors_2020_01_01_8999104 | fkey_from_child_to_parent_8999104 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid) + sensors_2020_01_01_8999104 | fkey_from_child_to_ref_8999104 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999104 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_2020_01_01_8999104 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid) + sensors_2020_01_01_8999104 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid) + sensors_2020_01_01_8999104 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999104 | sensors_2020_01_01_8999104_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_8999100 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_8999100 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid) + sensors_8999100 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid) + sensors_8999100 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_8999100 | sensors_8999100_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_news_8999106 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_news_8999106 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid) + sensors_news_8999106 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid) + sensors_news_8999106 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_old_8999102 | fkey_from_parent_to_child_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999120(eventdatetime, measureid) + sensors_old_8999102 | fkey_from_parent_to_dist_8999100 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999116(measureid) + sensors_old_8999102 | fkey_from_parent_to_parent_8999100 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999118(eventdatetime, measureid) + sensors_old_8999102 | fkey_from_parent_to_ref_8999100 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) +(22 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + colocated_dist_table_8999116 | CREATE UNIQUE INDEX colocated_dist_table_pkey_8999116 ON citus_split_test_schema_columnar_partitioned.colocated_dist_table_8999116 USING btree (measureid) + colocated_partitioned_table_2020_01_01_8999120 | CREATE UNIQUE INDEX colocated_partitioned_table_2020_01_01_pkey_8999120 ON citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_2020_01_01_8999120 USING btree (measureid, eventdatetime) + colocated_partitioned_table_8999118 | CREATE UNIQUE INDEX colocated_partitioned_table_pkey_8999118 ON ONLY citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_8999118 USING btree (measureid, eventdatetime) + reference_table_8970011 | CREATE UNIQUE INDEX reference_table_pkey_8970011 ON citus_split_test_schema_columnar_partitioned.reference_table_8970011 USING btree (measureid) + sensors_2020_01_01_8999104 | CREATE INDEX index_on_child_8999104 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999104 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8999104 | CREATE INDEX sensors_2020_01_01_lower_idx_8999104 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999104 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8999104 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8999104 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999104 USING btree (measureid, eventdatetime, measure_data) + sensors_8999100 | CREATE INDEX index_on_parent_8999100 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999100 USING btree (lower((measureid)::text)) + sensors_8999100 | CREATE UNIQUE INDEX sensors_pkey_8999100 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999100 USING btree (measureid, eventdatetime, measure_data) + sensors_news_8999106 | CREATE INDEX sensors_news_lower_idx_8999106 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999106 USING btree (lower((measureid)::text)) + sensors_news_8999106 | CREATE UNIQUE INDEX sensors_news_pkey_8999106 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999106 USING btree (measureid, eventdatetime, measure_data) + sensors_old_8999102 | CREATE INDEX sensors_old_lower_idx_8999102 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999102 USING btree (lower((measureid)::text)) + sensors_old_8999102 | CREATE UNIQUE INDEX sensors_old_pkey_8999102 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999102 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_2020_01_01_8999112 | CREATE INDEX index_on_child_columnar_8999112 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999112 USING btree (lower((measure_data)::text)) + sensorscolumnar_2020_01_01_8999112 | CREATE INDEX sensorscolumnar_2020_01_01_lower_idx_8999112 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999112 USING btree (lower((measureid)::text)) + sensorscolumnar_2020_01_01_8999112 | CREATE UNIQUE INDEX sensorscolumnar_2020_01_01_pkey_8999112 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999112 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_8999108 | CREATE INDEX index_on_parent_columnar_8999108 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999108 USING btree (lower((measureid)::text)) + sensorscolumnar_8999108 | CREATE UNIQUE INDEX sensorscolumnar_pkey_8999108 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999108 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_news_8999114 | CREATE INDEX sensorscolumnar_news_lower_idx_8999114 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999114 USING btree (lower((measureid)::text)) + sensorscolumnar_news_8999114 | CREATE UNIQUE INDEX sensorscolumnar_news_pkey_8999114 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999114 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_old_8999110 | CREATE INDEX sensorscolumnar_old_lower_idx_8999110 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999110 USING btree (lower((measureid)::text)) + sensorscolumnar_old_8999110 | CREATE UNIQUE INDEX sensorscolumnar_old_pkey_8999110 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999110 USING btree (measureid, eventdatetime, measure_data) +(22 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8999100 + s1_c + s1_c_8999108 + s2 + s2_8999104 + s2_c + s2_c_8999112 +(8 rows) + + \c - - - :worker_2_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_2020_01_01_8999005 | fkey_from_child_to_child_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_child_to_dist_8999005 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_2020_01_01_8999005 | fkey_from_child_to_parent_8999005 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_child_to_ref_8999005 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_2020_01_01_8999005 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999005 | sensors_2020_01_01_8999005_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_2020_01_01_8999105 | fkey_from_child_to_child_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_2020_01_01_8999105 | fkey_from_child_to_dist_8999105 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid) + sensors_2020_01_01_8999105 | fkey_from_child_to_parent_8999105 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid) + sensors_2020_01_01_8999105 | fkey_from_child_to_ref_8999105 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999105 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_2020_01_01_8999105 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid) + sensors_2020_01_01_8999105 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid) + sensors_2020_01_01_8999105 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_2020_01_01_8999105 | sensors_2020_01_01_8999105_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_8999001 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_8999001 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_8999001 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_8999001 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_8999001 | sensors_8999001_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_8999101 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_8999101 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid) + sensors_8999101 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid) + sensors_8999101 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_8999101 | sensors_8999101_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_news_8999007 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_news_8999007 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_news_8999007 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_news_8999007 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_news_8999107 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_news_8999107 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid) + sensors_news_8999107 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid) + sensors_news_8999107 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_old_8999003 | fkey_from_parent_to_child_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999021(eventdatetime, measureid) + sensors_old_8999003 | fkey_from_parent_to_dist_8999001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999017(measureid) + sensors_old_8999003 | fkey_from_parent_to_parent_8999001 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999019(eventdatetime, measureid) + sensors_old_8999003 | fkey_from_parent_to_ref_8999001 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) + sensors_old_8999103 | fkey_from_parent_to_child_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8999121(eventdatetime, measureid) + sensors_old_8999103 | fkey_from_parent_to_dist_8999101 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8999117(measureid) + sensors_old_8999103 | fkey_from_parent_to_parent_8999101 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8999119(eventdatetime, measureid) + sensors_old_8999103 | fkey_from_parent_to_ref_8999101 | FOREIGN KEY (measureid) REFERENCES reference_table_8970011(measureid) +(44 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + colocated_dist_table_8999017 | CREATE UNIQUE INDEX colocated_dist_table_pkey_8999017 ON citus_split_test_schema_columnar_partitioned.colocated_dist_table_8999017 USING btree (measureid) + colocated_dist_table_8999117 | CREATE UNIQUE INDEX colocated_dist_table_pkey_8999117 ON citus_split_test_schema_columnar_partitioned.colocated_dist_table_8999117 USING btree (measureid) + colocated_partitioned_table_2020_01_01_8999021 | CREATE UNIQUE INDEX colocated_partitioned_table_2020_01_01_pkey_8999021 ON citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_2020_01_01_8999021 USING btree (measureid, eventdatetime) + colocated_partitioned_table_2020_01_01_8999121 | CREATE UNIQUE INDEX colocated_partitioned_table_2020_01_01_pkey_8999121 ON citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_2020_01_01_8999121 USING btree (measureid, eventdatetime) + colocated_partitioned_table_8999019 | CREATE UNIQUE INDEX colocated_partitioned_table_pkey_8999019 ON ONLY citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_8999019 USING btree (measureid, eventdatetime) + colocated_partitioned_table_8999119 | CREATE UNIQUE INDEX colocated_partitioned_table_pkey_8999119 ON ONLY citus_split_test_schema_columnar_partitioned.colocated_partitioned_table_8999119 USING btree (measureid, eventdatetime) + reference_table_8970011 | CREATE UNIQUE INDEX reference_table_pkey_8970011 ON citus_split_test_schema_columnar_partitioned.reference_table_8970011 USING btree (measureid) + sensors_2020_01_01_8999005 | CREATE INDEX index_on_child_8999005 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999005 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8999005 | CREATE INDEX sensors_2020_01_01_lower_idx_8999005 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999005 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8999005 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8999005 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999005 USING btree (measureid, eventdatetime, measure_data) + sensors_2020_01_01_8999105 | CREATE INDEX index_on_child_8999105 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999105 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8999105 | CREATE INDEX sensors_2020_01_01_lower_idx_8999105 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999105 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8999105 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8999105 ON citus_split_test_schema_columnar_partitioned.sensors_2020_01_01_8999105 USING btree (measureid, eventdatetime, measure_data) + sensors_8999001 | CREATE INDEX index_on_parent_8999001 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999001 USING btree (lower((measureid)::text)) + sensors_8999001 | CREATE UNIQUE INDEX sensors_pkey_8999001 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999001 USING btree (measureid, eventdatetime, measure_data) + sensors_8999101 | CREATE INDEX index_on_parent_8999101 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999101 USING btree (lower((measureid)::text)) + sensors_8999101 | CREATE UNIQUE INDEX sensors_pkey_8999101 ON ONLY citus_split_test_schema_columnar_partitioned.sensors_8999101 USING btree (measureid, eventdatetime, measure_data) + sensors_news_8999007 | CREATE INDEX sensors_news_lower_idx_8999007 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999007 USING btree (lower((measureid)::text)) + sensors_news_8999007 | CREATE UNIQUE INDEX sensors_news_pkey_8999007 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999007 USING btree (measureid, eventdatetime, measure_data) + sensors_news_8999107 | CREATE INDEX sensors_news_lower_idx_8999107 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999107 USING btree (lower((measureid)::text)) + sensors_news_8999107 | CREATE UNIQUE INDEX sensors_news_pkey_8999107 ON citus_split_test_schema_columnar_partitioned.sensors_news_8999107 USING btree (measureid, eventdatetime, measure_data) + sensors_old_8999003 | CREATE INDEX sensors_old_lower_idx_8999003 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999003 USING btree (lower((measureid)::text)) + sensors_old_8999003 | CREATE UNIQUE INDEX sensors_old_pkey_8999003 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999003 USING btree (measureid, eventdatetime, measure_data) + sensors_old_8999103 | CREATE INDEX sensors_old_lower_idx_8999103 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999103 USING btree (lower((measureid)::text)) + sensors_old_8999103 | CREATE UNIQUE INDEX sensors_old_pkey_8999103 ON citus_split_test_schema_columnar_partitioned.sensors_old_8999103 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_2020_01_01_8999013 | CREATE INDEX index_on_child_columnar_8999013 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999013 USING btree (lower((measure_data)::text)) + sensorscolumnar_2020_01_01_8999013 | CREATE INDEX sensorscolumnar_2020_01_01_lower_idx_8999013 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999013 USING btree (lower((measureid)::text)) + sensorscolumnar_2020_01_01_8999013 | CREATE UNIQUE INDEX sensorscolumnar_2020_01_01_pkey_8999013 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999013 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_2020_01_01_8999113 | CREATE INDEX index_on_child_columnar_8999113 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999113 USING btree (lower((measure_data)::text)) + sensorscolumnar_2020_01_01_8999113 | CREATE INDEX sensorscolumnar_2020_01_01_lower_idx_8999113 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999113 USING btree (lower((measureid)::text)) + sensorscolumnar_2020_01_01_8999113 | CREATE UNIQUE INDEX sensorscolumnar_2020_01_01_pkey_8999113 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_2020_01_01_8999113 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_8999009 | CREATE INDEX index_on_parent_columnar_8999009 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999009 USING btree (lower((measureid)::text)) + sensorscolumnar_8999009 | CREATE UNIQUE INDEX sensorscolumnar_pkey_8999009 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999009 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_8999109 | CREATE INDEX index_on_parent_columnar_8999109 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999109 USING btree (lower((measureid)::text)) + sensorscolumnar_8999109 | CREATE UNIQUE INDEX sensorscolumnar_pkey_8999109 ON ONLY citus_split_test_schema_columnar_partitioned.sensorscolumnar_8999109 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_news_8999015 | CREATE INDEX sensorscolumnar_news_lower_idx_8999015 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999015 USING btree (lower((measureid)::text)) + sensorscolumnar_news_8999015 | CREATE UNIQUE INDEX sensorscolumnar_news_pkey_8999015 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999015 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_news_8999115 | CREATE INDEX sensorscolumnar_news_lower_idx_8999115 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999115 USING btree (lower((measureid)::text)) + sensorscolumnar_news_8999115 | CREATE UNIQUE INDEX sensorscolumnar_news_pkey_8999115 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_news_8999115 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_old_8999011 | CREATE INDEX sensorscolumnar_old_lower_idx_8999011 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999011 USING btree (lower((measureid)::text)) + sensorscolumnar_old_8999011 | CREATE UNIQUE INDEX sensorscolumnar_old_pkey_8999011 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999011 USING btree (measureid, eventdatetime, measure_data) + sensorscolumnar_old_8999111 | CREATE INDEX sensorscolumnar_old_lower_idx_8999111 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999111 USING btree (lower((measureid)::text)) + sensorscolumnar_old_8999111 | CREATE UNIQUE INDEX sensorscolumnar_old_pkey_8999111 ON citus_split_test_schema_columnar_partitioned.sensorscolumnar_old_8999111 USING btree (measureid, eventdatetime, measure_data) +(43 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8999001 + s1_8999101 + s1_c + s1_c_8999009 + s1_c_8999109 + s2 + s2_8999005 + s2_8999105 + s2_c + s2_c_8999013 + s2_c_8999113 +(12 rows) + +-- END: Show the updated state on workers +--BEGIN : Cleanup + \c - postgres - :master_port + DROP SCHEMA "citus_split_test_schema_columnar_partitioned" CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table citus_split_test_schema_columnar_partitioned.sensors +drop cascades to table citus_split_test_schema_columnar_partitioned.sensorscolumnar +drop cascades to table citus_split_test_schema_columnar_partitioned.colocated_dist_table +drop cascades to table citus_split_test_schema_columnar_partitioned.colocated_partitioned_table +drop cascades to table citus_split_test_schema_columnar_partitioned.reference_table +--END : Cleanup diff --git a/src/test/regress/expected/shard_move_constraints_blocking.out b/src/test/regress/expected/shard_move_constraints_blocking.out new file mode 100644 index 000000000..3eae6b41c --- /dev/null +++ b/src/test/regress/expected/shard_move_constraints_blocking.out @@ -0,0 +1,362 @@ +CREATE SCHEMA "blocking shard Move Fkeys Indexes"; +SET search_path TO "blocking shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +-- create a non-superuser role +CREATE ROLE mx_rebalancer_blocking_role_ent WITH LOGIN; +GRANT ALL ON SCHEMA "blocking shard Move Fkeys Indexes" TO mx_rebalancer_blocking_role_ent; +-- connect with this new role +\c - mx_rebalancer_blocking_role_ent - :master_port +SET search_path TO "blocking shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE sensors( +measureid integer, +eventdatetime date, +measure_data jsonb, +PRIMARY KEY (measureid, eventdatetime, measure_data)) +PARTITION BY RANGE(eventdatetime); +CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); +CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); +CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); +CREATE INDEX hash_index ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime); +CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; +CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; +ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; +CLUSTER sensors_2020_01_01 USING index_on_child; +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- due to https://github.com/citusdata/citus/issues/5121 +\c - postgres - :master_port +SET search_path TO "blocking shard Move Fkeys Indexes"; +SELECT update_distributed_table_colocation('sensors_old', 'sensors'); + update_distributed_table_colocation +--------------------------------------------------------------------- + +(1 row) + +SELECT update_distributed_table_colocation('sensors_2020_01_01', 'sensors'); + update_distributed_table_colocation +--------------------------------------------------------------------- + +(1 row) + +SELECT update_distributed_table_colocation('sensors_news', 'sensors'); + update_distributed_table_colocation +--------------------------------------------------------------------- + +(1 row) + +\c - mx_rebalancer_blocking_role_ent - :master_port +SET search_path TO "blocking shard Move Fkeys Indexes"; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 8970016; +SET citus.next_placement_id TO 8770016; +-- create a colocated distributed tables and create foreign keys FROM/TO +-- the partitions +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) +PARTITION BY RANGE(eventdatetime); +CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +SELECT create_distributed_table('colocated_partitioned_table', 'measureid', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- this table is used to make sure that index backed +-- replica identites can have clustered indexes +-- and no index statistics +CREATE TABLE index_backed_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON index_backed_rep_identity(key); +ALTER TABLE index_backed_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER index_backed_rep_identity USING uqx; +SELECT create_distributed_table('index_backed_rep_identity', 'key', colocate_with:='sensors'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- from parent to regular dist +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- from parent to parent +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); +-- from parent to child +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); +-- from parent to reference table +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); +-- from child to regular dist +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- from child to parent +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_parent FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table(measureid,eventdatetime); +-- from child to child +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_child FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid,eventdatetime); +-- from child to reference table +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); +-- load some data +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +\c - postgres - :worker_1_port +SET search_path TO "blocking shard Move Fkeys Indexes", public, pg_catalog; +-- show the current state of the constraints +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970028(measureid) + sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(5 rows) + +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_child_to_child_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_child_to_dist_8970008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_child_to_parent_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_child_to_ref_8970008 | FOREIGN KEY (measureid) REFERENCES reference_table_8970028(measureid) + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970028(measureid) + sensors_2020_01_01_8970008_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(9 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8970000 | CREATE INDEX hash_index_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8970000 | CREATE INDEX index_on_parent_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING btree (lower((measureid)::text)) + sensors_8970000 | CREATE INDEX index_with_include_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_8970000 | CREATE UNIQUE INDEX sensors_pkey_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_2020_01_01_8970008 | CREATE INDEX index_on_child_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_idx_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_measure_data_eventdatetime_idx_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_lower_idx_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8970008 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (measureid, eventdatetime, measure_data) +(5 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + index_backed_rep_identity_8970029 | CREATE UNIQUE INDEX uqx_8970029 ON "blocking shard Move Fkeys Indexes".index_backed_rep_identity_8970029 USING btree (key) +(1 row) + +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + indisclustered +--------------------------------------------------------------------- + t +(1 row) + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('blocking shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8970000 + s1_8970002 + s2 + s2_8970008 + s2_8970010 +(6 rows) + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); + count +--------------------------------------------------------------------- + 3 +(1 row) + +\c - - - :master_port +-- make sure that constrainst are moved sanely with logical replication +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +NOTICE: cleaned up 8 orphaned shards +\c - postgres - :worker_2_port +SET search_path TO "blocking shard Move Fkeys Indexes", public, pg_catalog; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970028(measureid) + sensors_8970000_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(5 rows) + +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; + Constraint | Definition +--------------------------------------------------------------------- + fkey_from_child_to_child_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_child_to_dist_8970008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_child_to_parent_8970008 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_child_to_ref_8970008 | FOREIGN KEY (measureid) REFERENCES reference_table_8970028(measureid) + fkey_from_parent_to_child_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) + fkey_from_parent_to_dist_8970000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8970016(measureid) + fkey_from_parent_to_parent_8970000 | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_8970020(eventdatetime, measureid) + fkey_from_parent_to_ref_8970000 | FOREIGN KEY (measureid) REFERENCES reference_table_8970028(measureid) + sensors_2020_01_01_8970008_measureid_eventdatetime_fkey | FOREIGN KEY (eventdatetime, measureid) REFERENCES colocated_partitioned_table_2020_01_01_8970024(eventdatetime, measureid) +(9 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8970000 | CREATE INDEX hash_index_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8970000 | CREATE INDEX index_on_parent_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING btree (lower((measureid)::text)) + sensors_8970000 | CREATE INDEX index_with_include_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_8970000 | CREATE UNIQUE INDEX sensors_pkey_8970000 ON ONLY "blocking shard Move Fkeys Indexes".sensors_8970000 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_2020_01_01_8970008 | CREATE INDEX index_on_child_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measure_data)::text)) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_idx_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_expr_measure_data_eventdatetime_idx_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime) + sensors_2020_01_01_8970008 | CREATE INDEX sensors_2020_01_01_lower_idx_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (lower((measureid)::text)) + sensors_2020_01_01_8970008 | CREATE UNIQUE INDEX sensors_2020_01_01_pkey_8970008 ON "blocking shard Move Fkeys Indexes".sensors_2020_01_01_8970008 USING btree (measureid, eventdatetime, measure_data) +(5 rows) + +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + index_backed_rep_identity_8970029 | CREATE UNIQUE INDEX uqx_8970029 ON "blocking shard Move Fkeys Indexes".index_backed_rep_identity_8970029 USING btree (key) +(1 row) + +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + indisclustered +--------------------------------------------------------------------- + t +(1 row) + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('blocking shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + s1 + s1_8970000 + s1_8970001 + s1_8970003 + s2 + s2_8970008 + s2_8970009 + s2_8970011 +(8 rows) + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); + count +--------------------------------------------------------------------- + 3 +(1 row) + +\c - mx_rebalancer_blocking_role_ent - :master_port +-- verify that the data is consistent +SET search_path TO "blocking shard Move Fkeys Indexes"; +SELECT count(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT count(*) FROM colocated_partitioned_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT count(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT count(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- we should be able to change/drop constraints +ALTER INDEX index_on_parent RENAME TO index_on_parent_renamed; +ALTER INDEX index_on_child RENAME TO index_on_child_renamed; +ALTER INDEX index_on_parent_renamed ALTER COLUMN 1 SET STATISTICS 200; +ALTER INDEX index_on_child_renamed ALTER COLUMN 1 SET STATISTICS 200; +DROP STATISTICS s1,s2; +DROP INDEX index_on_parent_renamed; +DROP INDEX index_on_child_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_dist; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_parent; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_child; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_dist; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_parent; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_child; +-- cleanup +\c - postgres - :master_port +DROP SCHEMA "blocking shard Move Fkeys Indexes" CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table "blocking shard Move Fkeys Indexes".sensors +drop cascades to table "blocking shard Move Fkeys Indexes".colocated_dist_table +drop cascades to table "blocking shard Move Fkeys Indexes".colocated_partitioned_table +drop cascades to table "blocking shard Move Fkeys Indexes".reference_table +drop cascades to table "blocking shard Move Fkeys Indexes".index_backed_rep_identity diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index 18601a1ab..37d9746f1 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -13,3 +13,6 @@ test: worker_split_text_copy_test test: citus_split_shard_by_split_points_negative test: citus_split_shard_by_split_points test: citus_split_shard_by_split_points_failure +# Name citus_split_shard_by_split_points_columnar_partitioned was too long and being truncated. +# use citus_split_shard_columnar_partitioned instead. +test: citus_split_shard_columnar_partitioned diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql index e730a8c28..ad76c5c7c 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql @@ -121,25 +121,3 @@ SELECT citus_split_shard_by_split_points( 51261400, ARRAY['-1073741826'], ARRAY[:worker_1_node, :worker_2_node]); - --- Create distributed table with columnar type. -SET citus.next_shard_id TO 51271400; -CREATE TABLE table_to_split_columnar (id bigserial PRIMARY KEY, value char) USING columnar; -SELECT create_distributed_table('table_to_split_columnar','id'); - --- UDF fails for columnar table. -SELECT citus_split_shard_by_split_points( - 51271400, - ARRAY['-1073741826'], - ARRAY[:worker_1_node, :worker_2_node]); - --- Create distributed table which is partitioned. -SET citus.next_shard_id TO 51271900; -CREATE TABLE table_to_split_partitioned(id integer, dt date) PARTITION BY RANGE(dt); -SELECT create_distributed_table('table_to_split_partitioned','id'); - --- UDF fails for partitioned table. -SELECT citus_split_shard_by_split_points( - 51271900, - ARRAY['-1073741826'], - ARRAY[:worker_1_node, :worker_2_node]); diff --git a/src/test/regress/sql/citus_split_shard_columnar_partitioned.sql b/src/test/regress/sql/citus_split_shard_columnar_partitioned.sql new file mode 100644 index 000000000..5955be1a2 --- /dev/null +++ b/src/test/regress/sql/citus_split_shard_columnar_partitioned.sql @@ -0,0 +1,294 @@ +CREATE SCHEMA "citus_split_test_schema_columnar_partitioned"; +SET search_path TO "citus_split_test_schema_columnar_partitioned"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; + +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. + CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + PRIMARY KEY (measureid, eventdatetime, measure_data)) + PARTITION BY RANGE(eventdatetime); + + -- Table access method is specified on child tables + CREATE TABLE sensorscolumnar( + measureid integer, + eventdatetime date, + measure_data jsonb, + PRIMARY KEY (measureid, eventdatetime, measure_data)) + PARTITION BY RANGE(eventdatetime); + + -- Create Partitions of table 'sensors'. + CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); + CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); + CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); + + CREATE TABLE sensorscolumnar_old PARTITION OF sensorscolumnar FOR VALUES FROM ('2000-01-01') TO ('2020-01-01') USING COLUMNAR; + CREATE TABLE sensorscolumnar_2020_01_01 PARTITION OF sensorscolumnar FOR VALUES FROM ('2020-01-01') TO ('2020-02-01') USING COLUMNAR; + CREATE TABLE sensorscolumnar_news PARTITION OF sensorscolumnar FOR VALUES FROM ('2020-05-01') TO ('2025-01-01') USING COLUMNAR; + + -- Create index on parent and child partitions. + CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); + CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); + + CREATE INDEX index_on_parent_columnar ON sensorscolumnar(lower(measureid::text)); + CREATE INDEX index_on_child_columnar ON sensorscolumnar_2020_01_01(lower(measure_data::text)); + + ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; + ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; + + ALTER INDEX index_on_parent_columnar ALTER COLUMN 1 SET STATISTICS 1000; + ALTER INDEX index_on_child_columnar ALTER COLUMN 1 SET STATISTICS 1000; + + -- Create statistics on parent and child partitions. + CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; + CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; + + CREATE STATISTICS s1_c (dependencies) ON measureid, eventdatetime FROM sensorscolumnar; + CREATE STATISTICS s2_c (dependencies) ON measureid, eventdatetime FROM sensorscolumnar_2020_01_01; + + CLUSTER sensors_2020_01_01 USING index_on_child; + SELECT create_distributed_table('sensors', 'measureid'); + SELECT create_distributed_table('sensorscolumnar', 'measureid'); + + -- create colocated distributed tables + CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); + SELECT create_distributed_table('colocated_dist_table', 'measureid'); + CLUSTER colocated_dist_table USING colocated_dist_table_pkey; + + CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) + PARTITION BY RANGE(eventdatetime); + CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); + SELECT create_distributed_table('colocated_partitioned_table', 'measureid'); + CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; + + -- create reference tables + CREATE TABLE reference_table (measureid integer PRIMARY KEY); + SELECT create_reference_table('reference_table'); + + SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + INNER JOIN pg_catalog.pg_namespace ns ON cls.relnamespace = ns.oid + WHERE node.noderole = 'primary' AND ns.nspname = 'citus_split_test_schema_columnar_partitioned' + ORDER BY logicalrelid, shardminvalue::BIGINT; +-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. + +-- BEGIN: Create constraints for tables. + -- from parent to regular dist + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + + -- from parent to parent + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); + + -- from parent to child + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); + + ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + + -- from child to regular dist + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + + -- from child to parent + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_parent FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table(measureid,eventdatetime); + + -- from child to child + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_child FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid,eventdatetime); + + ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + + -- No support for foreign keys, unique constraints, or exclusion constraints in columnar tables. + -- Please see: https://github.com/citusdata/citus/tree/main/src/backend/columnar/README.md + +-- END: Create constraints for tables. + +-- BEGIN: Load data into tables + INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; + INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; + INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; + INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; + INSERT INTO sensorscolumnar SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; +-- END: Load data into tables + +-- BEGIN: Show the current state on workers +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + + \c - - - :worker_2_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; +-- END: Show the current state on workers + +-- BEGIN: Split a shard along its co-located shards +\c - - - :master_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.next_shard_id TO 8999000; + SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset + SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset + + SELECT pg_catalog.citus_split_shard_by_split_points( + 8970000, + ARRAY['-2120000000'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); +-- END: Split a shard along its co-located shards + +-- BEGIN: Validate Shard Info and Data + SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + INNER JOIN pg_catalog.pg_namespace ns ON cls.relnamespace = ns.oid + WHERE node.noderole = 'primary' AND ns.nspname = 'citus_split_test_schema_columnar_partitioned' + ORDER BY logicalrelid, shardminvalue::BIGINT; + + SELECT count(*) FROM reference_table; + SELECT count(*) FROM colocated_partitioned_table; + SELECT count(*) FROM colocated_dist_table; + SELECT count(*) FROM sensors; + SELECT count(*) FROM sensorscolumnar; +-- END: Validate Shard Info and Data + +-- BEGIN: Show the updated state on workers + \c - - - :worker_1_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + + \c - - - :worker_2_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; +-- END: Show the updated state on workers + +-- BEGIN: Split a partition table directly +\c - - - :master_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.next_shard_id TO 8999100; + SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset + SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset + + SELECT pg_catalog.citus_split_shard_by_split_points( + 8999002, -- sensors_old + ARRAY['-2127770000'], + ARRAY[:worker_1_node, :worker_2_node], + 'block_writes'); +-- END: Split a partition table directly + +-- BEGIN: Validate Shard Info and Data + SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + INNER JOIN pg_catalog.pg_namespace ns ON cls.relnamespace = ns.oid + WHERE node.noderole = 'primary' AND ns.nspname = 'citus_split_test_schema_columnar_partitioned' + ORDER BY logicalrelid, shardminvalue::BIGINT; + + SELECT count(*) FROM reference_table; + SELECT count(*) FROM colocated_partitioned_table; + SELECT count(*) FROM colocated_dist_table; + SELECT count(*) FROM sensors; + SELECT count(*) FROM sensorscolumnar; +-- END: Validate Shard Info and Data + +-- BEGIN: Show the updated state on workers + \c - - - :worker_1_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; + + \c - - - :worker_2_port + SET search_path TO "citus_split_test_schema_columnar_partitioned"; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like '%_89%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like '%_89%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema_columnar_partitioned') + ) + ORDER BY stxname ASC; +-- END: Show the updated state on workers + +--BEGIN : Cleanup + \c - postgres - :master_port + DROP SCHEMA "citus_split_test_schema_columnar_partitioned" CASCADE; +--END : Cleanup diff --git a/src/test/regress/sql/shard_move_constraints_blocking.sql b/src/test/regress/sql/shard_move_constraints_blocking.sql new file mode 100644 index 000000000..6caad1eb0 --- /dev/null +++ b/src/test/regress/sql/shard_move_constraints_blocking.sql @@ -0,0 +1,201 @@ +CREATE SCHEMA "blocking shard Move Fkeys Indexes"; +SET search_path TO "blocking shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; + +-- create a non-superuser role +CREATE ROLE mx_rebalancer_blocking_role_ent WITH LOGIN; +GRANT ALL ON SCHEMA "blocking shard Move Fkeys Indexes" TO mx_rebalancer_blocking_role_ent; + +-- connect with this new role +\c - mx_rebalancer_blocking_role_ent - :master_port +SET search_path TO "blocking shard Move Fkeys Indexes"; +SET citus.next_shard_id TO 8970000; +SET citus.next_placement_id TO 8770000; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; + +CREATE TABLE sensors( +measureid integer, +eventdatetime date, +measure_data jsonb, +PRIMARY KEY (measureid, eventdatetime, measure_data)) +PARTITION BY RANGE(eventdatetime); + +CREATE TABLE sensors_old PARTITION OF sensors FOR VALUES FROM ('2000-01-01') TO ('2020-01-01'); +CREATE TABLE sensors_2020_01_01 PARTITION OF sensors FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +CREATE TABLE sensors_news PARTITION OF sensors FOR VALUES FROM ('2020-05-01') TO ('2025-01-01'); + +CREATE INDEX index_on_parent ON sensors(lower(measureid::text)); +CREATE INDEX index_on_child ON sensors_2020_01_01(lower(measure_data::text)); +CREATE INDEX hash_index ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime); + +CREATE STATISTICS s1 (dependencies) ON measureid, eventdatetime FROM sensors; +CREATE STATISTICS s2 (dependencies) ON measureid, eventdatetime FROM sensors_2020_01_01; + +ALTER INDEX index_on_parent ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX index_on_child ALTER COLUMN 1 SET STATISTICS 1000; + +CLUSTER sensors_2020_01_01 USING index_on_child; +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); + +-- due to https://github.com/citusdata/citus/issues/5121 +\c - postgres - :master_port +SET search_path TO "blocking shard Move Fkeys Indexes"; + +SELECT update_distributed_table_colocation('sensors_old', 'sensors'); +SELECT update_distributed_table_colocation('sensors_2020_01_01', 'sensors'); +SELECT update_distributed_table_colocation('sensors_news', 'sensors'); + +\c - mx_rebalancer_blocking_role_ent - :master_port +SET search_path TO "blocking shard Move Fkeys Indexes"; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 8970016; +SET citus.next_placement_id TO 8770016; + +-- create a colocated distributed tables and create foreign keys FROM/TO +-- the partitions +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); + +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; + +CREATE TABLE colocated_partitioned_table( + measureid integer, + eventdatetime date, + PRIMARY KEY (measureid, eventdatetime)) +PARTITION BY RANGE(eventdatetime); + +CREATE TABLE colocated_partitioned_table_2020_01_01 PARTITION OF colocated_partitioned_table FOR VALUES FROM ('2020-01-01') TO ('2020-02-01'); +SELECT create_distributed_table('colocated_partitioned_table', 'measureid', colocate_with:='sensors'); + +CLUSTER colocated_partitioned_table_2020_01_01 USING colocated_partitioned_table_2020_01_01_pkey; + +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + +-- this table is used to make sure that index backed +-- replica identites can have clustered indexes +-- and no index statistics +CREATE TABLE index_backed_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON index_backed_rep_identity(key); +ALTER TABLE index_backed_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER index_backed_rep_identity USING uqx; +SELECT create_distributed_table('index_backed_rep_identity', 'key', colocate_with:='sensors'); + +-- from parent to regular dist +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + +-- from parent to parent +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_parent FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table(measureid, eventdatetime); + +-- from parent to child +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_child FOREIGN KEY (measureid, eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid, eventdatetime); + +-- from parent to reference table +ALTER TABLE sensors ADD CONSTRAINT fkey_from_parent_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + +-- from child to regular dist +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); + +-- from child to parent +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_parent FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table(measureid,eventdatetime); + +-- from child to child +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_child FOREIGN KEY (measureid,eventdatetime) REFERENCES colocated_partitioned_table_2020_01_01(measureid,eventdatetime); + +-- from child to reference table +ALTER TABLE sensors_2020_01_01 ADD CONSTRAINT fkey_from_child_to_ref FOREIGN KEY (measureid) REFERENCES reference_table(measureid); + +-- load some data +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_partitioned_table SELECT i, '2020-01-05' FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}' FROM generate_series(0,1000)i; + +\c - postgres - :worker_1_port +SET search_path TO "blocking shard Move Fkeys Indexes", public, pg_catalog; + +-- show the current state of the constraints +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('blocking shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); +\c - - - :master_port +-- make sure that constrainst are moved sanely with logical replication +SELECT citus_move_shard_placement(8970000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); +CALL citus_cleanup_orphaned_shards(); + + +\c - postgres - :worker_2_port +SET search_path TO "blocking shard Move Fkeys Indexes", public, pg_catalog; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_8970000'::regclass ORDER BY 1,2; +SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='sensors_2020_01_01_8970008'::regclass ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_8970000' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='sensors_2020_01_01_8970008' ORDER BY 1,2; +SELECT tablename, indexdef FROM pg_indexes WHERE tablename ='index_backed_rep_identity_8970029' ORDER BY 1,2; +SELECT indisclustered FROM pg_index where indisclustered AND indrelid = 'index_backed_rep_identity_8970029'::regclass; + +SELECT stxname FROM pg_statistic_ext +WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('blocking shard Move Fkeys Indexes') +) +ORDER BY stxname ASC; + +SELECT count(*) FROM pg_index +WHERE indisclustered + and +indrelid IN +('sensors_2020_01_01_8970008'::regclass, 'colocated_dist_table_8970016'::regclass, 'colocated_partitioned_table_2020_01_01_8970024'::regclass); + +\c - mx_rebalancer_blocking_role_ent - :master_port +-- verify that the data is consistent +SET search_path TO "blocking shard Move Fkeys Indexes"; +SELECT count(*) FROM reference_table; +SELECT count(*) FROM colocated_partitioned_table; +SELECT count(*) FROM colocated_dist_table; +SELECT count(*) FROM sensors; + +-- we should be able to change/drop constraints +ALTER INDEX index_on_parent RENAME TO index_on_parent_renamed; +ALTER INDEX index_on_child RENAME TO index_on_child_renamed; + +ALTER INDEX index_on_parent_renamed ALTER COLUMN 1 SET STATISTICS 200; +ALTER INDEX index_on_child_renamed ALTER COLUMN 1 SET STATISTICS 200; + +DROP STATISTICS s1,s2; + +DROP INDEX index_on_parent_renamed; +DROP INDEX index_on_child_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_dist; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_parent; +ALTER TABLE sensors DROP CONSTRAINT fkey_from_parent_to_child; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_dist; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_parent; +ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_child; + +-- cleanup +\c - postgres - :master_port +DROP SCHEMA "blocking shard Move Fkeys Indexes" CASCADE; From eb3e5ee227b5919a778bdacac6ff8046adffa0a5 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Tue, 5 Jul 2022 00:01:30 +0300 Subject: [PATCH 10/10] Introduce citus_locks view citus_locks combines the pg_locks views from all nodes and adds global_pid, nodeid, and relation_name. The columns of citus_locks don't change based on the Postgres version, however the pg_locks's columns do. Postgres 14 added one more column to pg_locks (waitstart timestamptz). citus_locks has the most expansive column set, including the newly added column. If citus_locks is queried in a Postgres version where pg_locks doesn't have some columns, the values for those columns in citus_locks will be NULL --- .../distributed/sql/citus--11.0-3--11.1-1.sql | 2 + .../sql/downgrades/citus--11.1-1--11.0-3.sql | 3 + .../sql/udfs/citus_locks/11.1-1.sql | 86 +++++++++++++++++++ .../sql/udfs/citus_locks/latest.sql | 86 +++++++++++++++++++ src/test/regress/after_pg_upgrade_schedule | 2 +- src/test/regress/before_pg_upgrade_schedule | 1 + src/test/regress/expected/citus_locks.out | 47 ++++++++++ .../expected/isolation_citus_locks.out | 69 +++++++++++++++ src/test/regress/expected/multi_extension.out | 4 +- .../regress/expected/upgrade_citus_locks.out | 17 ++++ .../expected/upgrade_list_citus_objects.out | 4 +- src/test/regress/isolation_schedule | 1 + src/test/regress/multi_1_schedule | 1 + .../regress/spec/isolation_citus_locks.spec | 49 +++++++++++ src/test/regress/sql/citus_locks.sql | 20 +++++ src/test/regress/sql/upgrade_citus_locks.sql | 7 ++ 16 files changed, 396 insertions(+), 3 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/citus_locks/11.1-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_locks/latest.sql create mode 100644 src/test/regress/expected/citus_locks.out create mode 100644 src/test/regress/expected/isolation_citus_locks.out create mode 100644 src/test/regress/expected/upgrade_citus_locks.out create mode 100644 src/test/regress/spec/isolation_citus_locks.spec create mode 100644 src/test/regress/sql/citus_locks.sql create mode 100644 src/test/regress/sql/upgrade_citus_locks.sql diff --git a/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql index a9c9108a0..f8b956378 100644 --- a/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql +++ b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql @@ -1,3 +1,5 @@ +#include "udfs/citus_locks/11.1-1.sql" + DROP FUNCTION pg_catalog.worker_create_schema(bigint,text); DROP FUNCTION pg_catalog.worker_cleanup_job_schema_cache(); DROP FUNCTION pg_catalog.worker_fetch_foreign_file(text, text, bigint, text[], integer[]); diff --git a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql index f9b2f19d5..26430a9f6 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql @@ -77,3 +77,6 @@ DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_ OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8); #include "../udfs/get_all_active_transactions/11.0-1.sql" + +DROP VIEW pg_catalog.citus_locks; +DROP FUNCTION pg_catalog.citus_locks(); diff --git a/src/backend/distributed/sql/udfs/citus_locks/11.1-1.sql b/src/backend/distributed/sql/udfs/citus_locks/11.1-1.sql new file mode 100644 index 000000000..cfe269909 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_locks/11.1-1.sql @@ -0,0 +1,86 @@ +-- citus_locks combines the pg_locks views from all nodes and adds global_pid, nodeid, and +-- relation_name. The columns of citus_locks don't change based on the Postgres version, +-- however the pg_locks's columns do. Postgres 14 added one more column to pg_locks +-- (waitstart timestamptz). citus_locks has the most expansive column set, including the +-- newly added column. If citus_locks is queried in a Postgres version where pg_locks +-- doesn't have some columns, the values for those columns in citus_locks will be NULL +CREATE OR REPLACE FUNCTION pg_catalog.citus_locks ( + OUT global_pid bigint, + OUT nodeid int, + OUT locktype text, + OUT database oid, + OUT relation oid, + OUT relation_name text, + OUT page integer, + OUT tuple smallint, + OUT virtualxid text, + OUT transactionid xid, + OUT classid oid, + OUT objid oid, + OUT objsubid smallint, + OUT virtualtransaction text, + OUT pid integer, + OUT mode text, + OUT granted boolean, + OUT fastpath boolean, + OUT waitstart timestamp with time zone +) + RETURNS SETOF record + LANGUAGE plpgsql + AS $function$ +BEGIN + RETURN QUERY + SELECT * + FROM jsonb_to_recordset(( + SELECT + jsonb_agg(all_citus_locks_rows_as_jsonb.citus_locks_row_as_jsonb)::jsonb + FROM ( + SELECT + jsonb_array_elements(run_command_on_all_nodes.result::jsonb)::jsonb || + ('{"nodeid":' || run_command_on_all_nodes.nodeid || '}')::jsonb AS citus_locks_row_as_jsonb + FROM + run_command_on_all_nodes ( + $$ + SELECT + coalesce(to_jsonb (array_agg(citus_locks_from_one_node.*)), '[{}]'::jsonb) + FROM ( + SELECT + global_pid, pg_locks.relation::regclass::text AS relation_name, pg_locks.* + FROM pg_locks + LEFT JOIN get_all_active_transactions () ON process_id = pid) AS citus_locks_from_one_node; + $$, + parallel:= TRUE, + give_warning_for_connection_errors:= TRUE) + WHERE + success = 't') + AS all_citus_locks_rows_as_jsonb)) +AS ( + global_pid bigint, + nodeid int, + locktype text, + database oid, + relation oid, + relation_name text, + page integer, + tuple smallint, + virtualxid text, + transactionid xid, + classid oid, + objid oid, + objsubid smallint, + virtualtransaction text, + pid integer, + mode text, + granted boolean, + fastpath boolean, + waitstart timestamp with time zone +); +END; +$function$; + +CREATE OR REPLACE VIEW citus.citus_locks AS +SELECT * FROM pg_catalog.citus_locks(); + +ALTER VIEW citus.citus_locks SET SCHEMA pg_catalog; + +GRANT SELECT ON pg_catalog.citus_locks TO PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_locks/latest.sql b/src/backend/distributed/sql/udfs/citus_locks/latest.sql new file mode 100644 index 000000000..cfe269909 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_locks/latest.sql @@ -0,0 +1,86 @@ +-- citus_locks combines the pg_locks views from all nodes and adds global_pid, nodeid, and +-- relation_name. The columns of citus_locks don't change based on the Postgres version, +-- however the pg_locks's columns do. Postgres 14 added one more column to pg_locks +-- (waitstart timestamptz). citus_locks has the most expansive column set, including the +-- newly added column. If citus_locks is queried in a Postgres version where pg_locks +-- doesn't have some columns, the values for those columns in citus_locks will be NULL +CREATE OR REPLACE FUNCTION pg_catalog.citus_locks ( + OUT global_pid bigint, + OUT nodeid int, + OUT locktype text, + OUT database oid, + OUT relation oid, + OUT relation_name text, + OUT page integer, + OUT tuple smallint, + OUT virtualxid text, + OUT transactionid xid, + OUT classid oid, + OUT objid oid, + OUT objsubid smallint, + OUT virtualtransaction text, + OUT pid integer, + OUT mode text, + OUT granted boolean, + OUT fastpath boolean, + OUT waitstart timestamp with time zone +) + RETURNS SETOF record + LANGUAGE plpgsql + AS $function$ +BEGIN + RETURN QUERY + SELECT * + FROM jsonb_to_recordset(( + SELECT + jsonb_agg(all_citus_locks_rows_as_jsonb.citus_locks_row_as_jsonb)::jsonb + FROM ( + SELECT + jsonb_array_elements(run_command_on_all_nodes.result::jsonb)::jsonb || + ('{"nodeid":' || run_command_on_all_nodes.nodeid || '}')::jsonb AS citus_locks_row_as_jsonb + FROM + run_command_on_all_nodes ( + $$ + SELECT + coalesce(to_jsonb (array_agg(citus_locks_from_one_node.*)), '[{}]'::jsonb) + FROM ( + SELECT + global_pid, pg_locks.relation::regclass::text AS relation_name, pg_locks.* + FROM pg_locks + LEFT JOIN get_all_active_transactions () ON process_id = pid) AS citus_locks_from_one_node; + $$, + parallel:= TRUE, + give_warning_for_connection_errors:= TRUE) + WHERE + success = 't') + AS all_citus_locks_rows_as_jsonb)) +AS ( + global_pid bigint, + nodeid int, + locktype text, + database oid, + relation oid, + relation_name text, + page integer, + tuple smallint, + virtualxid text, + transactionid xid, + classid oid, + objid oid, + objsubid smallint, + virtualtransaction text, + pid integer, + mode text, + granted boolean, + fastpath boolean, + waitstart timestamp with time zone +); +END; +$function$; + +CREATE OR REPLACE VIEW citus.citus_locks AS +SELECT * FROM pg_catalog.citus_locks(); + +ALTER VIEW citus.citus_locks SET SCHEMA pg_catalog; + +GRANT SELECT ON pg_catalog.citus_locks TO PUBLIC; diff --git a/src/test/regress/after_pg_upgrade_schedule b/src/test/regress/after_pg_upgrade_schedule index f8e4e66ae..52b88d71d 100644 --- a/src/test/regress/after_pg_upgrade_schedule +++ b/src/test/regress/after_pg_upgrade_schedule @@ -1,4 +1,4 @@ -test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity +test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks # This attempts dropping citus extension (and rollbacks), so please do # not run in parallel with any other tests. diff --git a/src/test/regress/before_pg_upgrade_schedule b/src/test/regress/before_pg_upgrade_schedule index 880b25e0a..93bcce368 100644 --- a/src/test/regress/before_pg_upgrade_schedule +++ b/src/test/regress/before_pg_upgrade_schedule @@ -7,6 +7,7 @@ test: upgrade_type_before test: upgrade_distributed_function_before upgrade_rebalance_strategy_before test: upgrade_autoconverted_before test: upgrade_citus_stat_activity +test: upgrade_citus_locks # upgrade_columnar_before renames public schema to citus_schema, so let's # run this test as the last one. diff --git a/src/test/regress/expected/citus_locks.out b/src/test/regress/expected/citus_locks.out new file mode 100644 index 000000000..3724092e5 --- /dev/null +++ b/src/test/regress/expected/citus_locks.out @@ -0,0 +1,47 @@ +CREATE SCHEMA citus_locks; +SET search_path TO citus_locks; +SET citus.next_shard_id TO 1000; +CREATE TABLE dist_locked_table(id int, data text); +SELECT create_distributed_table('dist_locked_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +BEGIN; +-- Alter a distributed table so that we get some locks +ALTER TABLE dist_locked_table ADD COLUMN new_data_column text; +-- list the locks on relations for current distributed transaction +SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode, granted +FROM citus_locks +WHERE global_pid = citus_backend_gpid() AND locktype = 'relation' AND relation_name LIKE '%dist_locked_table%' +ORDER BY 1, 2, 3, 4; + relation_name | citus_nodename_for_nodeid | citus_nodeport_for_nodeid | mode | granted +--------------------------------------------------------------------- + citus_locks.dist_locked_table | localhost | 57636 | AccessExclusiveLock | t + citus_locks.dist_locked_table | localhost | 57636 | AccessShareLock | t + citus_locks.dist_locked_table | localhost | 57637 | AccessExclusiveLock | t + citus_locks.dist_locked_table | localhost | 57637 | AccessShareLock | t + citus_locks.dist_locked_table | localhost | 57638 | AccessExclusiveLock | t + citus_locks.dist_locked_table | localhost | 57638 | AccessShareLock | t + citus_locks.dist_locked_table_1000 | localhost | 57637 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1000 | localhost | 57637 | AccessShareLock | t + citus_locks.dist_locked_table_1000 | localhost | 57638 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1000 | localhost | 57638 | AccessShareLock | t + citus_locks.dist_locked_table_1001 | localhost | 57637 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1001 | localhost | 57637 | AccessShareLock | t + citus_locks.dist_locked_table_1001 | localhost | 57638 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1001 | localhost | 57638 | AccessShareLock | t + citus_locks.dist_locked_table_1002 | localhost | 57637 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1002 | localhost | 57637 | AccessShareLock | t + citus_locks.dist_locked_table_1002 | localhost | 57638 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1002 | localhost | 57638 | AccessShareLock | t + citus_locks.dist_locked_table_1003 | localhost | 57637 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1003 | localhost | 57637 | AccessShareLock | t + citus_locks.dist_locked_table_1003 | localhost | 57638 | AccessExclusiveLock | t + citus_locks.dist_locked_table_1003 | localhost | 57638 | AccessShareLock | t +(22 rows) + +ROLLBACK; +DROP SCHEMA citus_locks CASCADE; +NOTICE: drop cascades to table dist_locked_table diff --git a/src/test/regress/expected/isolation_citus_locks.out b/src/test/regress/expected/isolation_citus_locks.out new file mode 100644 index 000000000..5d6d9c654 --- /dev/null +++ b/src/test/regress/expected/isolation_citus_locks.out @@ -0,0 +1,69 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-record-gpid s1-begin s2-show-locks s1-alter-dist-table s2-show-locks s1-commit s2-show-locks +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-record-gpid: + SELECT citus_backend_gpid() INTO selected_gpid; + +step s1-begin: + BEGIN; + +step s2-show-locks: + SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode + FROM citus_locks + WHERE global_pid IN (SELECT * FROM selected_gpid) AND relation_name LIKE 'dist_table%' + ORDER BY 1, 2, 3, 4; + +relation_name|citus_nodename_for_nodeid|citus_nodeport_for_nodeid|mode +--------------------------------------------------------------------- +(0 rows) + +step s1-alter-dist-table: + ALTER TABLE dist_table ADD COLUMN data text; + +step s2-show-locks: + SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode + FROM citus_locks + WHERE global_pid IN (SELECT * FROM selected_gpid) AND relation_name LIKE 'dist_table%' + ORDER BY 1, 2, 3, 4; + +relation_name |citus_nodename_for_nodeid|citus_nodeport_for_nodeid|mode +--------------------------------------------------------------------- +dist_table |localhost | 57636|AccessExclusiveLock +dist_table |localhost | 57636|AccessShareLock +dist_table |localhost | 57637|AccessExclusiveLock +dist_table |localhost | 57637|AccessShareLock +dist_table |localhost | 57638|AccessExclusiveLock +dist_table |localhost | 57638|AccessShareLock +dist_table_12345000|localhost | 57637|AccessExclusiveLock +dist_table_12345000|localhost | 57637|AccessShareLock +dist_table_12345001|localhost | 57638|AccessExclusiveLock +dist_table_12345001|localhost | 57638|AccessShareLock +dist_table_12345002|localhost | 57637|AccessExclusiveLock +dist_table_12345002|localhost | 57637|AccessShareLock +dist_table_12345003|localhost | 57638|AccessExclusiveLock +dist_table_12345003|localhost | 57638|AccessShareLock +(14 rows) + +step s1-commit: + COMMIT; + +step s2-show-locks: + SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode + FROM citus_locks + WHERE global_pid IN (SELECT * FROM selected_gpid) AND relation_name LIKE 'dist_table%' + ORDER BY 1, 2, 3, 4; + +relation_name|citus_nodename_for_nodeid|citus_nodeport_for_nodeid|mode +--------------------------------------------------------------------- +(0 rows) + +citus_remove_node +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 5d9d3f45a..c2624894c 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1095,10 +1095,12 @@ SELECT * FROM multi_extension.print_extension_changes(); table columnar.chunk_group | table columnar.options | table columnar.stripe | + | function citus_locks() SETOF record | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.shard_transfer_mode) void | function worker_split_copy(bigint,split_copy_info[]) void | type split_copy_info -(24 rows) + | view citus_locks +(26 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/upgrade_citus_locks.out b/src/test/regress/expected/upgrade_citus_locks.out new file mode 100644 index 000000000..9bfda82a9 --- /dev/null +++ b/src/test/regress/expected/upgrade_citus_locks.out @@ -0,0 +1,17 @@ +SELECT column_name FROM information_schema.columns WHERE table_name = 'citus_locks' AND column_name NOT IN ('waitstart') +EXCEPT SELECT column_name FROM information_schema.columns WHERE table_name = 'pg_locks' +ORDER BY 1; + column_name +--------------------------------------------------------------------- + global_pid + nodeid + relation_name +(3 rows) + +SELECT column_name FROM information_schema.columns WHERE table_name = 'pg_locks' +EXCEPT SELECT column_name FROM information_schema.columns WHERE table_name = 'citus_locks' +ORDER BY 1; + column_name +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 63c82f3d7..0271d4a77 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -77,6 +77,7 @@ ORDER BY 1; function citus_jsonb_concatenate(jsonb,jsonb) function citus_jsonb_concatenate_final(jsonb) function citus_local_disk_space_stats() + function citus_locks() function citus_move_shard_placement(bigint,text,integer,text,integer,citus.shard_transfer_mode) function citus_node_capacity_1(integer) function citus_nodeid_for_gpid(bigint) @@ -253,6 +254,7 @@ ORDER BY 1; type split_copy_info view citus_dist_stat_activity view citus_lock_waits + view citus_locks view citus_schema.citus_tables view citus_shard_indexes_on_worker view citus_shards @@ -261,5 +263,5 @@ ORDER BY 1; view citus_stat_statements view pg_dist_shard_placement view time_partitions -(253 rows) +(255 rows) diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index c0fbfcfa3..0b9738840 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -67,6 +67,7 @@ test: isolation_max_client_connections test: isolation_undistribute_table test: isolation_fix_partition_shard_index_names test: isolation_global_pid +test: isolation_citus_locks # Rebalancer test: isolation_blocking_move_single_shard_commands diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 9d70f169e..74cc196d4 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -287,6 +287,7 @@ test: create_citus_local_table_cascade test: fkeys_between_local_ref test: auto_undist_citus_local test: mx_regular_user +test: citus_locks test: global_cancel test: remove_coordinator diff --git a/src/test/regress/spec/isolation_citus_locks.spec b/src/test/regress/spec/isolation_citus_locks.spec new file mode 100644 index 000000000..cfad78fc5 --- /dev/null +++ b/src/test/regress/spec/isolation_citus_locks.spec @@ -0,0 +1,49 @@ +#include "isolation_mx_common.include.spec" + +setup +{ + SELECT citus_add_node('localhost', 57636, groupid:=0); + SET citus.next_shard_id TO 12345000; + CREATE TABLE dist_table (a INT, b INT); + SELECT create_distributed_table('dist_table', 'a', shard_count:=4); +} + +teardown +{ + DROP TABLE dist_table, selected_gpid; + SELECT citus_remove_node('localhost', 57636); +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-alter-dist-table" +{ + ALTER TABLE dist_table ADD COLUMN data text; +} + +step "s1-record-gpid" +{ + SELECT citus_backend_gpid() INTO selected_gpid; +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-show-locks" +{ + SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode + FROM citus_locks + WHERE global_pid IN (SELECT * FROM selected_gpid) AND relation_name LIKE 'dist_table%' + ORDER BY 1, 2, 3, 4; +} + +permutation "s1-record-gpid" "s1-begin" "s2-show-locks" "s1-alter-dist-table" "s2-show-locks" "s1-commit" "s2-show-locks" diff --git a/src/test/regress/sql/citus_locks.sql b/src/test/regress/sql/citus_locks.sql new file mode 100644 index 000000000..dc4c7e9f6 --- /dev/null +++ b/src/test/regress/sql/citus_locks.sql @@ -0,0 +1,20 @@ +CREATE SCHEMA citus_locks; +SET search_path TO citus_locks; +SET citus.next_shard_id TO 1000; + +CREATE TABLE dist_locked_table(id int, data text); +SELECT create_distributed_table('dist_locked_table', 'id'); + +BEGIN; +-- Alter a distributed table so that we get some locks +ALTER TABLE dist_locked_table ADD COLUMN new_data_column text; + +-- list the locks on relations for current distributed transaction +SELECT relation_name, citus_nodename_for_nodeid(nodeid), citus_nodeport_for_nodeid(nodeid), mode, granted +FROM citus_locks +WHERE global_pid = citus_backend_gpid() AND locktype = 'relation' AND relation_name LIKE '%dist_locked_table%' +ORDER BY 1, 2, 3, 4; + +ROLLBACK; + +DROP SCHEMA citus_locks CASCADE; diff --git a/src/test/regress/sql/upgrade_citus_locks.sql b/src/test/regress/sql/upgrade_citus_locks.sql new file mode 100644 index 000000000..3c2595a65 --- /dev/null +++ b/src/test/regress/sql/upgrade_citus_locks.sql @@ -0,0 +1,7 @@ +SELECT column_name FROM information_schema.columns WHERE table_name = 'citus_locks' AND column_name NOT IN ('waitstart') +EXCEPT SELECT column_name FROM information_schema.columns WHERE table_name = 'pg_locks' +ORDER BY 1; + +SELECT column_name FROM information_schema.columns WHERE table_name = 'pg_locks' +EXCEPT SELECT column_name FROM information_schema.columns WHERE table_name = 'citus_locks' +ORDER BY 1;