diff --git a/.circleci/config.yml b/.circleci/config.yml index 0aa0fd4b8..7444e9bb3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,10 +6,7 @@ orbs: parameters: image_suffix: type: string - default: '-v2021_10_27' - pg12_version: - type: string - default: '12.8' + default: '-vabaecad' pg13_version: type: string default: '13.4' @@ -18,7 +15,7 @@ parameters: default: '14.0' upgrade_pg_versions: type: string - default: '12.8-13.4-14.0' + default: '13.4-14.0' jobs: build: description: Build the citus extension @@ -529,10 +526,6 @@ workflows: ignore: - /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.* - - build: - name: build-12 - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - build: name: build-13 pg_major: 13 @@ -545,80 +538,6 @@ workflows: - check-style - check-sql-snapshots - - test-citus: - name: 'test-12_check-multi' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-multi - requires: [build-12] - - test-citus: - name: 'test-12_check-multi-1' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-multi-1 - requires: [build-12] - - test-citus: - name: 'test-12_check-mx' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-multi-mx - requires: [build-12] - - test-citus: - name: 'test-12_check-vanilla' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-vanilla - requires: [build-12] - - test-citus: - name: 'test-12_check-isolation' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-isolation - requires: [build-12] - - test-citus: - name: 'test-12_check-worker' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-worker - requires: [build-12] - - test-citus: - name: 'test-12_check-operations' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-operations - requires: [build-12] - - test-citus: - name: 'test-12_check-follower-cluster' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-follower-cluster - requires: [build-12] - - test-citus: - name: 'test-12_check-columnar' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-columnar - requires: [build-12] - - test-citus: - name: 'test-12_check-columnar-isolation' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-columnar-isolation - requires: [build-12] - - tap-test-citus: - name: 'test_12_tap-recovery' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - suite: recovery - requires: [build-12] - - test-citus: - name: 'test-12_check-failure' - pg_major: 12 - image: citus/failtester - image_tag: '<< pipeline.parameters.pg12_version >>' - make: check-failure - requires: [build-12] - - test-citus: name: 'test-13_check-multi' pg_major: 13 @@ -767,11 +686,6 @@ workflows: make: check-failure requires: [build-14] - - test-arbitrary-configs: - name: 'test-12_check-arbitrary-configs' - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - requires: [build-12] - test-arbitrary-configs: name: 'test-13_check-arbitrary-configs' pg_major: 13 @@ -783,20 +697,6 @@ workflows: image_tag: '<< pipeline.parameters.pg14_version >>' requires: [build-14] - - test-pg-upgrade: - name: 'test-12-13_check-pg-upgrade' - old_pg_major: 12 - new_pg_major: 13 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-12, build-13] - - - test-pg-upgrade: - name: 'test-12-14_check-pg-upgrade' - old_pg_major: 12 - new_pg_major: 14 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-12, build-14] - - test-pg-upgrade: name: 'test-13-14_check-pg-upgrade' old_pg_major: 13 @@ -805,10 +705,10 @@ workflows: requires: [build-13, build-14] - test-citus-upgrade: - name: test-12_check-citus-upgrade - pg_major: 12 - image_tag: '<< pipeline.parameters.pg12_version >>' - requires: [build-12] + name: test-13_check-citus-upgrade + pg_major: 13 + image_tag: '<< pipeline.parameters.pg13_version >>' + requires: [build-13] - ch_benchmark: requires: [build-13] diff --git a/CHANGELOG.md b/CHANGELOG.md index af747f5ea..50a9c57e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,248 @@ +### citus v11.0.0_beta (March 22, 2022) ### + +* Drops support for PostgreSQL 12 + +* Turns metadata syncing on by default + +* Adds `citus_finalize_upgrade_to_citus11()` which is necessary to upgrade to + Citus 11+ from earlier versions + +* Adds `citus.max_client_connections` GUC to limit non-Citus connections + +* Allows locally creating objects having a dependency that cannot be distributed + +* Distributes aggregates with `CREATE AGGREGATE` command + +* Distributes functions with `CREATE FUNCTION` command + +* Adds `citus.create_object_propagation` GUC to control DDL creation behaviour + in transactions + +* Hides shards based on `application_name` prefix + +* Prevents specifying `application_name` via `citus.node_conninfo` + +* Starts identifying rebalancer backends by `application_name=citus_rebalancer` + +* Starts identifying internal backends by `application_name=citus_internal` + +* Adds `citus.enable_unsafe_triggers` flag to enable unsafe triggers on + distributed tables + +* Adds `fix_partition_shard_index_names` UDF to fix currently broken names + +* Adds propagation for foreign server commands + +* Adds propagation of `TEXT SEARCH CONFIGURATION` objects + +* Adds propagation of `TEXT SEARCH DICTIONARY` objects + +* Adds support for `ALTER FUNCTION ... SUPPORT ...` commands + +* Adds support for `CREATE SCHEMA AUTHORIZATION` statements without schema name + +* Adds support for `TRUNCATE` for foreign tables + +* Adds support for adding local tables to metadata using + `citus_add_local_table_to_metadata()` UDF + +* Adds support for adding partitioned local tables to Citus metadata + +* Adds support for automatic binary encoding in re-partition joins when possible + +* Adds support for foreign tables in MX + +* Adds support for operator class parameters in indexes + +* Adds support for re-partition joins in transaction blocks + +* Adds support for re-partition joins on followers + +* Adds support for shard replication > 1 hash distributed tables on Citus MX + +* Improves handling of `IN`, `OUT` and `INOUT` parameters for functions + +* Introduces `citus_backend_gpid()` UDF to get global pid of the current backend + +* Introduces `citus_check_cluster_node_health` UDF to check cluster connectivity + +* Introduces `citus_check_connection_to_node` UDF to check node connectivity + +* Introduces `citus_coordinator_nodeid` UDF to find the node id of the + coordinator node + +* Introduces `citus_stat_activity` view and drops `citus_worker_stat_activity` + UDF + +* Introduces `citus.use_citus_managed_tables` GUC to add local tables to Citus + metadata automatically + +* Introduces a new flag `force_delegation` in `create_distributed_function()` + +* Allows `create_distributed_function()` on a function owned by an extension + +* Allows creating distributed tables in sequential mode + +* Allows disabling nodes when multiple failures happen + +* Adds support for pushing procedures with `OUT` arguments down to the worker + nodes + +* Overrides `pg_cancel_backend()` and `pg_terminate_backend()` to run with + global pid + +* Delegates function calls of the form `SELECT .. FROM func()` + +* Adds propagation of `CREATE SCHEMA .. GRANT ON SCHEMA ..` commands + +* Propagates `pg_dist_object` to worker nodes + +* Adds propagation of `SCHEMA` operations + +* Adds missing version-mismatch checks for columnar tables + +* Adds missing version-mismatch checks for internal functions + +* `citus_shard_indexes_on_worker` shows all local shard indexes regardless of + `search_path` + +* `citus_shards_on_worker` shows all local shards regardless of `search_path` + +* Deprecates inactive shard state, never marks any placement inactive + +* Disables distributed & reference foreign tables + +* Prevents propagating objects having a circular dependency + +* Prevents propagating objects having a dependency to an object with unsupported + type + +* Deprecates `master_get_table_metadata` UDF + +* Disallows remote execution from queries on shards + +* Drops `citus.enable_cte_inlining` GUC + +* Drops `citus.single_shard_commit_protocol` GUC, defaults to 2PC + +* Drops support for `citus.multi_shard_commit_protocol`, always use 2PC + +* Avoids unnecessary errors for `ALTER STATISTICS IF EXISTS` when the statistics + does not exist + +* Fixes a bug that causes columnar storage pages to have zero LSN + +* Fixes a bug that causes issues while create dependencies from multiple + sessions + +* Fixes a bug that causes reading columnar metapage as all-zeros when + writing to a columnar table + +* Fixes a bug that could break `DROP SCHEMA/EXTENSON` commands when there is a + columnar table + +* Fixes a bug that could break pg upgrades due to missing `pg_depend` records + for columnar table access method + +* Fixes a bug that could cause `CREATE INDEX` to fail for expressions when using + custom `search_path` + +* Fixes a bug that could cause `worker_save_query_explain_analyze` to fail on + custom types + +* Fixes a bug that could cause failed re-partition joins to leak result tables + +* Fixes a bug that could cause prerequisite columnar table access method + objects being not created during pg upgrades + +* Fixes a bug that could cause re-partition joins involving local shards to fail + +* Fixes a bug that limits usage of sequences in non-int columns + +* Fixes a bug that prevents `DROP SCHEMA CASCADE` + +* Fixes a build error that happens when `lz4` is not installed + +* Fixes a clog lookup failure that could occur when writing to a columnar table + +* Fixes a crash that occurs when the aggregate that cannot be pushed-down + returns empty result from a worker + +* Fixes a missing `FROM` clause entry error + +* Fixes a possible segfault that could happen when reporting distributed + deadlock + +* Fixes an issue that could cause unexpected errors when there is an in-progress + write to a columnar table + +* Fixes an unexpected error that occurs due to aborted writes to a columnar + table with an index + +* Fixes an unexpected error that occurs when writing to a columnar table created + in older version + +* Fixes issue when compiling Citus from source with some compilers + +* Fixes issues on `ATTACH PARTITION` logic + +* Fixes naming issues of newly created partitioned indexes + +* Improves self-deadlock prevention for `CREATE INDEX / REINDEX CONCURRENTLY` + commands for builds using PG14 or higher + +* Moves `pg_dist_object` to `pg_catalog` schema + +* Partitions shards to be co-located with the parent shards + +* Prevents Citus table functions from being called on shards + +* Prevents creating distributed functions when there are out of sync nodes + +* Provides notice message for idempotent `create_distributed_function` calls + +* Reinstates optimisation for uniform shard interval ranges + +* Relaxes table ownership check to privileges check while acquiring lock + +* Drops support for `citus.shard_placement_policy` GUC + +* Drops `master_append_table_to_shard` UDF + +* Drops `master_apply_delete_command` UDF + +* Removes copy into new shard logic for append-distributed tables + +* Drops support for distributed `cstore_fdw` tables in favor of Citus + columnar table access method + +* Removes support for dropping distributed and local indexes in the same + statement + +* Replaces `citus.enable_object_propagation` GUC with + `citus.enable_metadata_sync` + +* Requires superuser for `citus_add_node()` and `citus_activate_node()` UDFs + +* Revokes read access to `columnar.chunk` from unprivileged user + +* Disallows unsupported lateral subqueries on distributed tables + +* Stops updating shard range in `citus_update_shard_statistics` for append + tables + +### citus v10.2.5 (March 15, 2022) ### + +* Fixes a bug that could cause `worker_save_query_explain_analyze` to fail on + custom types + +* Fixes a bug that limits usage of sequences in non-integer columns + +* Fixes a crash that occurs when the aggregate that cannot be pushed-down + returns empty result from a worker + +* Improves concurrent metadata syncing and metadata changing DDL operations + ### citus v10.2.4 (February 1, 2022) ### * Adds support for operator class parameters in indexes diff --git a/Makefile b/Makefile index 7569fd6ce..f119a204f 100644 --- a/Makefile +++ b/Makefile @@ -13,10 +13,16 @@ include Makefile.global all: extension + +# build columnar only +columnar: + $(MAKE) -C src/backend/columnar all # build extension -extension: $(citus_top_builddir)/src/include/citus_version.h +extension: $(citus_top_builddir)/src/include/citus_version.h columnar $(MAKE) -C src/backend/distributed/ all -install-extension: extension +install-columnar: columnar + $(MAKE) -C src/backend/columnar install +install-extension: extension install-columnar $(MAKE) -C src/backend/distributed/ install install-headers: extension $(MKDIR_P) '$(DESTDIR)$(includedir_server)/distributed/' @@ -27,6 +33,7 @@ install-headers: extension clean-extension: $(MAKE) -C src/backend/distributed/ clean + $(MAKE) -C src/backend/columnar/ clean clean-full: $(MAKE) -C src/backend/distributed/ clean-full .PHONY: extension install-extension clean-extension clean-full @@ -35,6 +42,7 @@ install: install-extension install-headers install-downgrades: $(MAKE) -C src/backend/distributed/ install-downgrades install-all: install-headers + $(MAKE) -C src/backend/columnar/ install-all $(MAKE) -C src/backend/distributed/ install-all clean: clean-extension diff --git a/configure b/configure index 5766ce8d6..ff70808d2 100755 --- a/configure +++ b/configure @@ -2555,7 +2555,7 @@ if test -z "$version_num"; then as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5 fi -if test "$version_num" != '12' -a "$version_num" != '13' -a "$version_num" != '14'; then +if test "$version_num" != '13' -a "$version_num" != '14'; then as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 diff --git a/configure.in b/configure.in index cb1d15b6b..ce89fc351 100644 --- a/configure.in +++ b/configure.in @@ -74,7 +74,7 @@ if test -z "$version_num"; then AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.]) fi -if test "$version_num" != '12' -a "$version_num" != '13' -a "$version_num" != '14'; then +if test "$version_num" != '13' -a "$version_num" != '14'; then AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) else AC_MSG_NOTICE([building against PostgreSQL $version_num]) diff --git a/src/backend/columnar/Makefile b/src/backend/columnar/Makefile new file mode 100644 index 000000000..abda9c90d --- /dev/null +++ b/src/backend/columnar/Makefile @@ -0,0 +1,17 @@ +citus_subdir = src/backend/columnar +citus_top_builddir = ../../.. +safestringlib_srcdir = $(citus_abs_top_srcdir)/vendor/safestringlib +SUBDIRS = . safeclib +SUBDIRS += +ENSURE_SUBDIRS_EXIST := $(shell mkdir -p $(SUBDIRS)) +OBJS += \ + $(patsubst $(citus_abs_srcdir)/%.c,%.o,$(foreach dir,$(SUBDIRS), $(sort $(wildcard $(citus_abs_srcdir)/$(dir)/*.c)))) + +MODULE_big = citus_columnar + +PG_CPPFLAGS += -I$(libpq_srcdir) -I$(safestringlib_srcdir)/include + +include $(citus_top_builddir)/Makefile.global + +.PHONY: install-all +install-all: install diff --git a/src/backend/columnar/columnar.c b/src/backend/columnar/columnar.c index 35a6f6da9..85ec06d00 100644 --- a/src/backend/columnar/columnar.c +++ b/src/backend/columnar/columnar.c @@ -22,6 +22,7 @@ #include "citus_version.h" #include "columnar/columnar.h" +#include "columnar/columnar_tableam.h" /* Default values for option parameters */ #define DEFAULT_STRIPE_ROW_COUNT 150000 @@ -53,6 +54,14 @@ static const struct config_enum_entry columnar_compression_options[] = { NULL, 0, false } }; +void +columnar_init(void) +{ + columnar_init_gucs(); + columnar_tableam_init(); +} + + void columnar_init_gucs() { diff --git a/src/backend/columnar/columnar_debug.c b/src/backend/columnar/columnar_debug.c index f72ec5f8f..220d259fe 100644 --- a/src/backend/columnar/columnar_debug.c +++ b/src/backend/columnar/columnar_debug.c @@ -11,7 +11,6 @@ #include "postgres.h" #include "funcapi.h" -#include "pg_config.h" #include "access/nbtree.h" #include "access/table.h" #include "catalog/pg_am.h" diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 916962b4a..b6179ac8c 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -103,8 +103,8 @@ typedef struct IndexFetchColumnarData MemoryContext scanContext; } IndexFetchColumnarData; - -ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook = NULL; +/* available to other extensions using find_rendezvous_variable() */ +static ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook = NULL; static object_access_hook_type PrevObjectAccessHook = NULL; static ProcessUtility_hook_type PrevProcessUtilityHook = NULL; @@ -1910,6 +1910,11 @@ ColumnarSubXactCallback(SubXactEvent event, SubTransactionId mySubid, void columnar_tableam_init() { + ColumnarTableSetOptions_hook_type **ColumnarTableSetOptions_hook_ptr = + (ColumnarTableSetOptions_hook_type **) find_rendezvous_variable( + COLUMNAR_SETOPTIONS_HOOK_SYM); + *ColumnarTableSetOptions_hook_ptr = &ColumnarTableSetOptions_hook; + RegisterXactCallback(ColumnarXactCallback, NULL); RegisterSubXactCallback(ColumnarSubXactCallback, NULL); diff --git a/src/backend/columnar/mod.c b/src/backend/columnar/mod.c index f2679f326..c5112a5f4 100644 --- a/src/backend/columnar/mod.c +++ b/src/backend/columnar/mod.c @@ -18,13 +18,15 @@ #include "citus_version.h" #include "columnar/columnar.h" -#include "columnar/mod.h" - #include "columnar/columnar_tableam.h" + +PG_MODULE_MAGIC; + +void _PG_init(void); + void -columnar_init(void) +_PG_init(void) { - columnar_init_gucs(); - columnar_tableam_init(); + columnar_init(); } diff --git a/src/backend/columnar/safeclib b/src/backend/columnar/safeclib new file mode 120000 index 000000000..c4da5b7ad --- /dev/null +++ b/src/backend/columnar/safeclib @@ -0,0 +1 @@ +../../../vendor/safestringlib/safeclib/ \ No newline at end of file diff --git a/src/backend/distributed/Makefile b/src/backend/distributed/Makefile index a2ffcc142..f537d4cd8 100644 --- a/src/backend/distributed/Makefile +++ b/src/backend/distributed/Makefile @@ -19,8 +19,6 @@ DATA_built = $(generated_sql_files) # directories with source files SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib test transaction utils worker -# columnar modules -SUBDIRS += ../columnar # enterprise modules SUBDIRS += @@ -84,7 +82,8 @@ endif .PHONY: clean-full install install-downgrades install-all cleanup-before-install: - rm -f $(DESTDIR)$(datadir)/$(datamoduledir)/citus* + rm -f $(DESTDIR)$(datadir)/$(datamoduledir)/citus.control + rm -f $(DESTDIR)$(datadir)/$(datamoduledir)/citus--* install: cleanup-before-install diff --git a/src/backend/distributed/commands/aggregate.c b/src/backend/distributed/commands/aggregate.c index f6161df49..3e6de88e5 100644 --- a/src/backend/distributed/commands/aggregate.c +++ b/src/backend/distributed/commands/aggregate.c @@ -22,6 +22,20 @@ #include "utils/lsyscache.h" +/* + * PreprocessDefineAggregateStmt only qualifies the node with schema name. + * We will handle the rest in the Postprocess phase. + */ +List * +PreprocessDefineAggregateStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + QualifyTreeNode((Node *) node); + + return NIL; +} + + /* * PostprocessDefineAggregateStmt actually creates the plan we need to execute for * aggregate propagation. @@ -37,8 +51,6 @@ List * PostprocessDefineAggregateStmt(Node *node, const char *queryString) { - QualifyTreeNode((Node *) node); - DefineStmt *stmt = castNode(DefineStmt, node); if (!ShouldPropagate()) diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 6664b6c1c..17e84dfaa 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -52,6 +52,7 @@ #include "distributed/multi_partitioning_utils.h" #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" +#include "distributed/shared_library_init.h" #include "distributed/shard_utils.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" @@ -687,7 +688,7 @@ ConvertTable(TableConversionState *con) strcmp(con->originalAccessMethod, "columnar") == 0) { ColumnarOptions options = { 0 }; - ReadColumnarOptions(con->relationId, &options); + extern_ReadColumnarOptions(con->relationId, &options); ColumnarTableDDLContext *context = (ColumnarTableDDLContext *) palloc0( sizeof(ColumnarTableDDLContext)); @@ -843,7 +844,7 @@ DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages) foreach_oid(indexId, indexIdList) { char *indexAmName = GetIndexAccessMethodName(indexId); - if (ColumnarSupportsIndexAM(indexAmName)) + if (extern_ColumnarSupportsIndexAM(indexAmName)) { continue; } diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index e447a2be1..f587c81c6 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -1106,13 +1106,10 @@ DropDefaultExpressionsAndMoveOwnedSequenceOwnerships(Oid sourceRelationId, ExtractDefaultColumnsAndOwnedSequences(sourceRelationId, &columnNameList, &ownedSequenceIdList); - ListCell *columnNameCell = NULL; - ListCell *ownedSequenceIdCell = NULL; - forboth(columnNameCell, columnNameList, ownedSequenceIdCell, ownedSequenceIdList) + char *columnName = NULL; + Oid ownedSequenceId = InvalidOid; + forboth_ptr_oid(columnName, columnNameList, ownedSequenceId, ownedSequenceIdList) { - char *columnName = (char *) lfirst(columnNameCell); - Oid ownedSequenceId = lfirst_oid(ownedSequenceIdCell); - DropDefaultColumnDefinition(sourceRelationId, columnName); /* column might not own a sequence */ diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 12bf1404a..c284404ce 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -19,6 +19,7 @@ #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/metadata_utility.h" +#include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" @@ -300,6 +301,32 @@ PreprocessAlterCollationOwnerStmt(Node *node, const char *queryString, } +/* + * PostprocessAlterCollationOwnerStmt is invoked after the owner has been changed locally. + * Since changing the owner could result in new dependencies being found for this object + * we re-ensure all the dependencies for the collation do exist. + * + * This is solely to propagate the new owner (and all its dependencies) if it was not + * already distributed in the cluster. + */ +List * +PostprocessAlterCollationOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_COLLATION); + + ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&collationAddress)) + { + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&collationAddress); + + return NIL; +} + + /* * PreprocessRenameCollationStmt is called when the user is renaming the collation. The invocation happens * before the statement is applied locally. @@ -562,6 +589,14 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString) ObjectAddress collationAddress = DefineCollationStmtObjectAddress(node, false); + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency( + &collationAddress); + if (errMsg != NULL) + { + RaiseDeferredError(errMsg, WARNING); + return NIL; + } + EnsureDependenciesExistOnAllNodes(&collationAddress); /* to prevent recursion with mx we disable ddl propagation */ diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index c639d836c..26a905f23 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -31,6 +31,7 @@ #include "catalog/pg_opclass.h" #include "catalog/pg_proc.h" #include "catalog/pg_trigger.h" +#include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/extension.h" #include "commands/sequence.h" @@ -579,7 +580,7 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName, * explicitly. */ void -EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId) +EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId) { List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); citusTableIdList = list_append_unique_oid(citusTableIdList, ownerRelationId); @@ -591,14 +592,11 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId) List *dependentSequenceList = NIL; GetDependentSequencesWithRelation(citusTableId, &attnumList, &dependentSequenceList, 0); - ListCell *attnumCell = NULL; - ListCell *dependentSequenceCell = NULL; - forboth(attnumCell, attnumList, dependentSequenceCell, - dependentSequenceList) + AttrNumber currentAttnum = InvalidAttrNumber; + Oid currentSeqOid = InvalidOid; + forboth_int_oid(currentAttnum, attnumList, currentSeqOid, + dependentSequenceList) { - AttrNumber currentAttnum = lfirst_int(attnumCell); - Oid currentSeqOid = lfirst_oid(dependentSequenceCell); - /* * If another distributed table is using the same sequence * in one of its column defaults, make sure the types of the @@ -606,9 +604,9 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId) */ if (currentSeqOid == seqOid) { - Oid currentSeqTypId = GetAttributeTypeOid(citusTableId, - currentAttnum); - if (seqTypId != currentSeqTypId) + Oid currentAttributeTypId = GetAttributeTypeOid(citusTableId, + currentAttnum); + if (attributeTypeId != currentAttributeTypId) { char *sequenceName = generate_qualified_relation_name( seqOid); @@ -674,28 +672,37 @@ static void EnsureDistributedSequencesHaveOneType(Oid relationId, List *dependentSequenceList, List *attnumList) { - ListCell *attnumCell = NULL; - ListCell *dependentSequenceCell = NULL; - forboth(attnumCell, attnumList, dependentSequenceCell, dependentSequenceList) + AttrNumber attnum = InvalidAttrNumber; + Oid sequenceOid = InvalidOid; + forboth_int_oid(attnum, attnumList, sequenceOid, dependentSequenceList) { - AttrNumber attnum = lfirst_int(attnumCell); - Oid sequenceOid = lfirst_oid(dependentSequenceCell); - /* * We should make sure that the type of the column that uses * that sequence is supported */ - Oid seqTypId = GetAttributeTypeOid(relationId, attnum); - EnsureSequenceTypeSupported(sequenceOid, seqTypId, relationId); + Oid attributeTypeId = GetAttributeTypeOid(relationId, attnum); + EnsureSequenceTypeSupported(sequenceOid, attributeTypeId, relationId); /* * Alter the sequence's data type in the coordinator if needed. + * + * First, we should only change the sequence type if the column + * is a supported sequence type. For example, if a sequence is used + * in an expression which then becomes a text, we should not try to + * alter the sequence type to text. Postgres only supports int2, int4 + * and int8 as the sequence type. + * * A sequence's type is bigint by default and it doesn't change even if * it's used in an int column. We should change the type if needed, * and not allow future ALTER SEQUENCE ... TYPE ... commands for - * sequences used as defaults in distributed tables + * sequences used as defaults in distributed tables. */ - AlterSequenceType(sequenceOid, seqTypId); + if (attributeTypeId == INT2OID || + attributeTypeId == INT4OID || + attributeTypeId == INT8OID) + { + AlterSequenceType(sequenceOid, attributeTypeId); + } } } diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 01f561b65..13ef40b13 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -158,6 +158,8 @@ EnsureDependenciesCanBeDistributed(const ObjectAddress *objectAddress) if (depError != NULL) { + /* override error detail as it is not applicable here*/ + depError->detail = NULL; RaiseDeferredError(depError, ERROR); } } @@ -398,6 +400,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return CreateTextSearchConfigDDLCommandsIdempotent(dependency); } + case OCLASS_TSDICT: + { + return CreateTextSearchDictDDLCommandsIdempotent(dependency); + } + case OCLASS_TYPE: { return CreateTypeDDLCommandsIdempotent(dependency); diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 80a2b6628..800392081 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -37,14 +37,14 @@ static DistributeObjectOps Aggregate_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = NULL, + .postprocess = PostprocessAlterFunctionOwnerStmt, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; static DistributeObjectOps Aggregate_Define = { .deparse = NULL, .qualify = QualifyDefineAggregateStmt, - .preprocess = NULL, + .preprocess = PreprocessDefineAggregateStmt, .postprocess = PostprocessDefineAggregateStmt, .address = DefineAggregateStmtObjectAddress, .markDistributed = true, @@ -269,7 +269,7 @@ static DistributeObjectOps Collation_AlterOwner = { .deparse = DeparseAlterCollationOwnerStmt, .qualify = QualifyAlterCollationOwnerStmt, .preprocess = PreprocessAlterCollationOwnerStmt, - .postprocess = NULL, + .postprocess = PostprocessAlterCollationOwnerStmt, .address = AlterCollationOwnerObjectAddress, .markDistributed = false, }; @@ -373,7 +373,7 @@ static DistributeObjectOps Function_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = NULL, + .postprocess = PostprocessAlterFunctionOwnerStmt, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -437,7 +437,7 @@ static DistributeObjectOps Procedure_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = NULL, + .postprocess = PostprocessAlterFunctionOwnerStmt, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -538,7 +538,7 @@ static DistributeObjectOps TextSearchConfig_Comment = { .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_Define = { - .deparse = DeparseCreateTextSearchStmt, + .deparse = DeparseCreateTextSearchConfigurationStmt, .qualify = NULL, .preprocess = NULL, .postprocess = PostprocessCreateTextSearchConfigurationStmt, @@ -561,6 +561,62 @@ static DistributeObjectOps TextSearchConfig_Rename = { .address = RenameTextSearchConfigurationStmtObjectAddress, .markDistributed = false, }; +static DistributeObjectOps TextSearchDict_Alter = { + .deparse = DeparseAlterTextSearchDictionaryStmt, + .qualify = QualifyAlterTextSearchDictionaryStmt, + .preprocess = PreprocessAlterTextSearchDictionaryStmt, + .postprocess = NULL, + .address = AlterTextSearchDictionaryStmtObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchDict_AlterObjectSchema = { + .deparse = DeparseAlterTextSearchDictionarySchemaStmt, + .qualify = QualifyAlterTextSearchDictionarySchemaStmt, + .preprocess = PreprocessAlterTextSearchDictionarySchemaStmt, + .postprocess = PostprocessAlterTextSearchDictionarySchemaStmt, + .address = AlterTextSearchDictionarySchemaStmtObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchDict_AlterOwner = { + .deparse = DeparseAlterTextSearchDictionaryOwnerStmt, + .qualify = QualifyAlterTextSearchDictionaryOwnerStmt, + .preprocess = PreprocessAlterTextSearchDictionaryOwnerStmt, + .postprocess = PostprocessAlterTextSearchDictionaryOwnerStmt, + .address = AlterTextSearchDictOwnerObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchDict_Comment = { + .deparse = DeparseTextSearchDictionaryCommentStmt, + .qualify = QualifyTextSearchDictionaryCommentStmt, + .preprocess = PreprocessTextSearchDictionaryCommentStmt, + .postprocess = NULL, + .address = TextSearchDictCommentObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchDict_Define = { + .deparse = DeparseCreateTextSearchDictionaryStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessCreateTextSearchDictionaryStmt, + .address = CreateTextSearchDictObjectAddress, + .markDistributed = true, +}; +static DistributeObjectOps TextSearchDict_Drop = { + .deparse = DeparseDropTextSearchDictionaryStmt, + .qualify = QualifyDropTextSearchDictionaryStmt, + .preprocess = PreprocessDropTextSearchDictionaryStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchDict_Rename = { + .deparse = DeparseRenameTextSearchDictionaryStmt, + .qualify = QualifyRenameTextSearchDictionaryStmt, + .preprocess = PreprocessRenameTextSearchDictionaryStmt, + .postprocess = NULL, + .address = RenameTextSearchDictionaryStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Trigger_AlterObjectDepends = { .deparse = NULL, .qualify = NULL, @@ -581,7 +637,7 @@ static DistributeObjectOps Routine_AlterOwner = { .deparse = DeparseAlterFunctionOwnerStmt, .qualify = QualifyAlterFunctionOwnerStmt, .preprocess = PreprocessAlterFunctionOwnerStmt, - .postprocess = NULL, + .postprocess = PostprocessAlterFunctionOwnerStmt, .address = AlterFunctionOwnerObjectAddress, .markDistributed = false, }; @@ -647,7 +703,7 @@ static DistributeObjectOps Statistics_AlterOwner = { .deparse = DeparseAlterStatisticsOwnerStmt, .qualify = QualifyAlterStatisticsOwnerStmt, .preprocess = PreprocessAlterStatisticsOwnerStmt, - .postprocess = NULL, + .postprocess = PostprocessAlterStatisticsOwnerStmt, .address = NULL, .markDistributed = false, }; @@ -872,6 +928,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_AlterObjectSchema; } + case OBJECT_TSDICTIONARY: + { + return &TextSearchDict_AlterObjectSchema; + } + case OBJECT_TYPE: { return &Type_AlterObjectSchema; @@ -934,6 +995,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_AlterOwner; } + case OBJECT_TSDICTIONARY: + { + return &TextSearchDict_AlterOwner; + } + case OBJECT_TYPE: { return &Type_AlterOwner; @@ -1020,6 +1086,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_Alter; } + case T_AlterTSDictionaryStmt: + { + return &TextSearchDict_Alter; + } + case T_ClusterStmt: { return &Any_Cluster; @@ -1035,6 +1106,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_Comment; } + case OBJECT_TSDICTIONARY: + { + return &TextSearchDict_Comment; + } + default: { return &NoDistributeOps; @@ -1107,6 +1183,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_Define; } + case OBJECT_TSDICTIONARY: + { + return &TextSearchDict_Define; + } + default: { return &NoDistributeOps; @@ -1189,6 +1270,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_Drop; } + case OBJECT_TSDICTIONARY: + { + return &TextSearchDict_Drop; + } + case OBJECT_TYPE: { return &Type_Drop; @@ -1293,6 +1379,11 @@ GetDistributeObjectOps(Node *node) return &TextSearchConfig_Rename; } + case OBJECT_TSDICTIONARY: + { + return &TextSearchDict_Rename; + } + case OBJECT_TYPE: { return &Type_Rename; diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 3aa782c06..c1cf06039 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -769,13 +769,23 @@ RecreateExtensionStmt(Oid extensionOid) /* make DefEleme for extensionSchemaName */ Node *schemaNameArg = (Node *) makeString(extensionSchemaName); - DefElem *schemaDefElement = makeDefElem("schema", schemaNameArg, location); /* append the schema name DefElem finally */ createExtensionStmt->options = lappend(createExtensionStmt->options, schemaDefElement); + char *extensionVersion = get_extension_version(extensionOid); + if (extensionVersion != NULL) + { + Node *extensionVersionArg = (Node *) makeString(extensionVersion); + DefElem *extensionVersionElement = + makeDefElem("new_version", extensionVersionArg, location); + + createExtensionStmt->options = lappend(createExtensionStmt->options, + extensionVersionElement); + } + return (Node *) createExtensionStmt; } diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index 67a3e4335..a84e8cce2 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -410,9 +410,8 @@ EnsureReferencingTableNotReplicated(Oid referencingTableId) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint"), - errdetail("Citus Community Edition currently supports " - "foreign key constraints only for " - "\"citus.shard_replication_factor = 1\"."), + errdetail("Citus currently supports foreign key constraints " + "only for \"citus.shard_replication_factor = 1\"."), errhint("Please change \"citus.shard_replication_factor to " "1\". To learn more about using foreign keys with " "other replication factors, please contact us at " diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 2c0dfd6bb..757c24fed 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -966,6 +966,14 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) insertorderbyat = agg->aggnumdirectargs; } + /* + * For zero-argument aggregate, write * in place of the list of arguments + */ + if (numargs == 0) + { + appendStringInfo(&buf, "*"); + } + for (i = 0; i < numargs; i++) { Oid argtype = argtypes[i]; @@ -1446,7 +1454,21 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok) } else { - objectWithArgs->objargs = list_make1(makeTypeName("anyelement")); + DefElem *defItem = NULL; + foreach_ptr(defItem, stmt->definition) + { + /* + * If no explicit args are given, pg includes basetype in the signature. + * If the basetype given is a type, like int4, we should include it in the + * signature. In that case, defItem->arg would be a TypeName. + * If the basetype given is a string, like "ANY", we shouldn't include it. + */ + if (strcmp(defItem->defname, "basetype") == 0 && IsA(defItem->arg, TypeName)) + { + objectWithArgs->objargs = lappend(objectWithArgs->objargs, + defItem->arg); + } + } } return FunctionToObjectAddress(OBJECT_AGGREGATE, objectWithArgs, missing_ok); @@ -1584,6 +1606,32 @@ PreprocessAlterFunctionOwnerStmt(Node *node, const char *queryString, } +/* + * PostprocessAlterFunctionOwnerStmt is invoked after the owner has been changed locally. + * Since changing the owner could result in new dependencies being found for this object + * we re-ensure all the dependencies for the function do exist. + * + * This is solely to propagate the new owner (and all its dependencies) if it was not + * already distributed in the cluster. + */ +List * +PostprocessAlterFunctionOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + AssertObjectTypeIsFunctional(stmt->objectType); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateAlterFunction(&address)) + { + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + /* * PreprocessDropFunctionStmt gets called during the planning phase of a DROP FUNCTION statement * and returns a list of DDLJob's that will drop any distributed functions from the diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 635e7adde..cdee81349 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -44,6 +44,7 @@ static ObjectAddress GetObjectAddressBySchemaName(char *schemaName, bool missing static List * FilterDistributedSchemas(List *schemas); static bool SchemaHasDistributedTableWithFKey(char *schemaName); static bool ShouldPropagateCreateSchemaStmt(void); +static List * GetGrantCommandsFromCreateSchemaStmt(Node *node); /* @@ -63,13 +64,17 @@ PreprocessCreateSchemaStmt(Node *node, const char *queryString, EnsureSequentialMode(OBJECT_SCHEMA); + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make1(DISABLE_DDL_PROPAGATION); + /* deparse sql*/ const char *sql = DeparseTreeNode(node); - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, - ENABLE_DDL_PROPAGATION); + commands = lappend(commands, (void *) sql); + + commands = list_concat(commands, GetGrantCommandsFromCreateSchemaStmt(node)); + + commands = lappend(commands, ENABLE_DDL_PROPAGATION); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -223,7 +228,24 @@ CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) { CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); - return GetObjectAddressBySchemaName(stmt->schemaname, missing_ok); + StringInfoData schemaName = { 0 }; + initStringInfo(&schemaName); + + if (stmt->schemaname == NULL) + { + /* + * If the schema name is not provided, the schema will be created + * with the name of the authorizated user. + */ + Assert(stmt->authrole != NULL); + appendStringInfoString(&schemaName, RoleSpecString(stmt->authrole, true)); + } + else + { + appendStringInfoString(&schemaName, stmt->schemaname); + } + + return GetObjectAddressBySchemaName(schemaName.data, missing_ok); } @@ -375,3 +397,44 @@ ShouldPropagateCreateSchemaStmt() return true; } + + +/* + * GetGrantCommandsFromCreateSchemaStmt takes a CreateSchemaStmt and returns the + * list of deparsed queries of the inner GRANT commands of the given statement. + * Ignores commands other than GRANT statements. + */ +static List * +GetGrantCommandsFromCreateSchemaStmt(Node *node) +{ + List *commands = NIL; + CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); + + Node *element = NULL; + foreach_ptr(element, stmt->schemaElts) + { + if (!IsA(element, GrantStmt)) + { + continue; + } + + GrantStmt *grantStmt = castNode(GrantStmt, element); + + switch (grantStmt->objtype) + { + /* we only propagate GRANT ON SCHEMA in community */ + case OBJECT_SCHEMA: + { + commands = lappend(commands, DeparseGrantOnSchemaStmt(element)); + break; + } + + default: + { + break; + } + } + } + + return commands; +} diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index 1265887ff..79a758c10 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -427,6 +427,37 @@ PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString, } +/* + * PostprocessAlterStatisticsOwnerStmt is invoked after the owner has been changed locally. + * Since changing the owner could result in new dependencies being found for this object + * we re-ensure all the dependencies for the statistics do exist. + * + * This is solely to propagate the new owner (and all its dependencies) if it was not + * already distributed in the cluster. + */ +List * +PostprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_STATISTIC_EXT); + + Oid statsOid = get_statistics_object_oid((List *) stmt->object, false); + Oid relationId = GetRelIdByStatsOid(statsOid); + + if (!IsCitusTable(relationId) || !ShouldPropagate()) + { + return NIL; + } + + ObjectAddress statisticsAddress = { 0 }; + ObjectAddressSet(statisticsAddress, StatisticExtRelationId, statsOid); + + EnsureDependenciesExistOnAllNodes(&statisticsAddress); + + return NIL; +} + + /* * GetExplicitStatisticsCommandList returns the list of DDL commands to create * or alter statistics that are explicitly created for the table with relationId. diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 4bf1ff373..220a4d049 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -3127,13 +3127,10 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId, List *taskList = NIL; - ListCell *leftShardCell = NULL; - ListCell *rightShardCell = NULL; - forboth(leftShardCell, leftShardList, rightShardCell, rightShardList) + ShardInterval *leftShardInterval = NULL; + ShardInterval *rightShardInterval = NULL; + forboth_ptr(leftShardInterval, leftShardList, rightShardInterval, rightShardList) { - ShardInterval *leftShardInterval = (ShardInterval *) lfirst(leftShardCell); - ShardInterval *rightShardInterval = (ShardInterval *) lfirst(rightShardCell); - uint64 leftShardId = leftShardInterval->shardId; uint64 rightShardId = rightShardInterval->shardId; diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c index 53080c42b..1b5e84aa7 100644 --- a/src/backend/distributed/commands/text_search.c +++ b/src/backend/distributed/commands/text_search.c @@ -18,7 +18,9 @@ #include "catalog/pg_ts_config_map.h" #include "catalog/pg_ts_dict.h" #include "catalog/pg_ts_parser.h" +#include "catalog/pg_ts_template.h" #include "commands/comment.h" +#include "commands/defrem.h" #include "commands/extension.h" #include "fmgr.h" #include "nodes/makefuncs.h" @@ -32,6 +34,7 @@ #include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" #include "distributed/listutils.h" +#include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" @@ -40,13 +43,19 @@ static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt); +static List * GetDistributedTextSearchDictionaryNames(DropStmt *stmt); static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid); +static DefineStmt * GetTextSearchDictionaryDefineStmt(Oid tsdictOid); +static List * GetTextSearchDictionaryInitOptions(HeapTuple tup, Form_pg_ts_dict dict); static List * GetTextSearchConfigCommentStmt(Oid tsconfigOid); +static List * GetTextSearchDictionaryCommentStmt(Oid tsconfigOid); static List * get_ts_parser_namelist(Oid tsparserOid); static List * GetTextSearchConfigMappingStmt(Oid tsconfigOid); static List * GetTextSearchConfigOwnerStmts(Oid tsconfigOid); +static List * GetTextSearchDictionaryOwnerStmts(Oid tsdictOid); static List * get_ts_dict_namelist(Oid tsdictOid); +static List * get_ts_template_namelist(Oid tstemplateOid); static Oid get_ts_config_parser_oid(Oid tsconfigOid); static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype); @@ -83,6 +92,14 @@ PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString EnsureSequentialMode(OBJECT_TSCONFIGURATION); ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&address); + if (errMsg != NULL) + { + RaiseDeferredError(errMsg, WARNING); + return NIL; + } + EnsureDependenciesExistOnAllNodes(&address); /* @@ -99,6 +116,56 @@ PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString } +/* + * PostprocessCreateTextSearchDictionaryStmt is called after the TEXT SEARCH DICTIONARY has been + * created locally. + */ +List * +PostprocessCreateTextSearchDictionaryStmt(Node *node, const char *queryString) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + Assert(stmt->kind == OBJECT_TSDICTIONARY); + + if (!ShouldPropagate()) + { + return NIL; + } + + /* check creation against multi-statement transaction policy */ + if (!ShouldPropagateCreateInCoordinatedTransction()) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&address); + if (errMsg != NULL) + { + RaiseDeferredError(errMsg, WARNING); + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&address); + + QualifyTreeNode(node); + const char *createTSDictionaryStmtSql = DeparseTreeNode(node); + + /* + * To prevent recursive propagation in mx architecture, we disable ddl + * propagation before sending the command to workers. + */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createTSDictionaryStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + List * GetCreateTextSearchConfigStatements(const ObjectAddress *address) { @@ -122,6 +189,25 @@ GetCreateTextSearchConfigStatements(const ObjectAddress *address) } +List * +GetCreateTextSearchDictionaryStatements(const ObjectAddress *address) +{ + Assert(address->classId == TSDictionaryRelationId); + List *stmts = NIL; + + /* CREATE TEXT SEARCH DICTIONARY ...*/ + stmts = lappend(stmts, GetTextSearchDictionaryDefineStmt(address->objectId)); + + /* ALTER TEXT SEARCH DICTIONARY ... OWNER TO ...*/ + stmts = list_concat(stmts, GetTextSearchDictionaryOwnerStmts(address->objectId)); + + /* COMMENT ON TEXT SEARCH DICTIONARY ... */ + stmts = list_concat(stmts, GetTextSearchDictionaryCommentStmt(address->objectId)); + + return stmts; +} + + /* * CreateTextSearchConfigDDLCommandsIdempotent creates a list of ddl commands to recreate * a TEXT SERACH CONFIGURATION object in an idempotent manner on workers. @@ -135,9 +221,22 @@ CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address) } +/* + * CreateTextSearchDictDDLCommandsIdempotent creates a list of ddl commands to recreate + * a TEXT SEARCH CONFIGURATION object in an idempotent manner on workers. + */ +List * +CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address) +{ + List *stmts = GetCreateTextSearchDictionaryStatements(address); + List *sqls = DeparseTreeNodes(stmts); + return list_make1(WrapCreateOrReplaceList(sqls)); +} + + /* * PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to - * the workers. After we have dropped the schema's locally they also got removed from + * the workers. After we have dropped the configurations locally they also got removed from * pg_dist_object so it is important to do all distribution checks before the change is * made locally. */ @@ -182,13 +281,64 @@ PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString, (void *) dropStmtSql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessDropTextSearchDictionaryStmt prepares the statements we need to send to + * the workers. After we have dropped the dictionaries locally they also got removed from + * pg_dist_object so it is important to do all distribution checks before the change is + * made locally. + */ +List * +PreprocessDropTextSearchDictionaryStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSDICTIONARY); + + if (!ShouldPropagate()) + { + return NIL; + } + + List *distributedObjects = GetDistributedTextSearchDictionaryNames(stmt); + if (list_length(distributedObjects) == 0) + { + /* no distributed objects to remove */ + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + /* + * Temporarily replace the list of objects being dropped with only the list + * containing the distributed objects. After we have created the sql statement we + * restore the original list of objects to execute on locally. + * + * Because searchpaths on coordinator and workers might not be in sync we fully + * qualify the list before deparsing. This is safe because qualification doesn't + * change the original names in place, but insteads creates new ones. + */ + List *originalObjects = stmt->objects; + stmt->objects = distributedObjects; + QualifyTreeNode((Node *) stmt); + const char *dropStmtSql = DeparseTreeNode((Node *) stmt); + stmt->objects = originalObjects; + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } /* * GetDistributedTextSearchConfigurationNames iterates over all text search configurations - * dropped, and create a list containign all configurations that are distributed. + * dropped, and create a list containing all configurations that are distributed. */ static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt) @@ -200,7 +350,7 @@ GetDistributedTextSearchConfigurationNames(DropStmt *stmt) Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok); if (!OidIsValid(tsconfigOid)) { - /* skip missing configuration names, they can't be dirstibuted */ + /* skip missing configuration names, they can't be distributed */ continue; } @@ -216,6 +366,36 @@ GetDistributedTextSearchConfigurationNames(DropStmt *stmt) } +/* + * GetDistributedTextSearchDictionaryNames iterates over all text search dictionaries + * dropped, and create a list containing all dictionaries that are distributed. + */ +static List * +GetDistributedTextSearchDictionaryNames(DropStmt *stmt) +{ + List *objName = NULL; + List *distributedObjects = NIL; + foreach_ptr(objName, stmt->objects) + { + Oid tsdictOid = get_ts_dict_oid(objName, stmt->missing_ok); + if (!OidIsValid(tsdictOid)) + { + /* skip missing dictionary names, they can't be distributed */ + continue; + } + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSDictionaryRelationId, tsdictOid); + if (!IsObjectDistributed(&address)) + { + continue; + } + distributedObjects = lappend(distributedObjects, objName); + } + return distributedObjects; +} + + /* * PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered * is distributed in the cluster. If that is the case it will prepare the list of commands @@ -243,7 +423,38 @@ PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString, (void *) alterStmtSql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessAlterTextSearchDictionaryStmt verifies if the dictionary being altered is + * distributed in the cluster. If that is the case it will prepare the list of commands to + * send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchDictionaryStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + QualifyTreeNode((Node *) stmt); + const char *alterStmtSql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) alterStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -276,7 +487,40 @@ PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString, (void *) ddlCommand, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessRenameTextSearchDictionaryStmt verifies if the dictionary being altered + * is distributed in the cluster. If that is the case it will prepare the list of commands + * to send to the worker to apply the same changes remote. + */ +List * +PreprocessRenameTextSearchDictionaryStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + QualifyTreeNode((Node *) stmt); + + char *ddlCommand = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) ddlCommand, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -310,7 +554,41 @@ PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *querySt (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessAlterTextSearchDictionarySchemaStmt verifies if the dictionary being + * altered is distributed in the cluster. If that is the case it will prepare the list of + * commands to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchDictionarySchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, + stmt->missing_ok); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -341,6 +619,33 @@ PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryS } +/* + * PostprocessAlterTextSearchDictionarySchemaStmt is invoked after the schema has been + * changed locally. Since changing the schema could result in new dependencies being found + * for this object we re-ensure all the dependencies for the dictionary do exist. This + * is solely to propagate the new schema (and all its dependencies) if it was not already + * distributed in the cluster. + */ +List * +PostprocessAlterTextSearchDictionarySchemaStmt(Node *node, const char *queryString) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, + stmt->missing_ok); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + /* dependencies have changed (schema) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + /* * PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed * configuration to the workers. Since comments for configurations are promenently shown @@ -370,7 +675,40 @@ PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PreprocessTextSearchDictionaryCommentStmt propagates any comment on a distributed + * dictionary to the workers. Since comments for dictionaries are promenently shown + * when listing all text search dictionaries this is purely a cosmetic thing when + * running in MX. + */ +List * +PreprocessTextSearchDictionaryCommentStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -407,6 +745,39 @@ PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryStr } +/* + * PreprocessAlterTextSearchDictionaryOwnerStmt verifies if the dictionary being + * altered is distributed in the cluster. If that is the case it will prepare the list of + * commands to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchDictionaryOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSDICTIONARY); + + QualifyTreeNode((Node *) stmt); + char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + /* * PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been * changed locally. Since changing the owner could result in new dependencies being found @@ -433,6 +804,32 @@ PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *querySt } +/* + * PostprocessAlterTextSearchDictionaryOwnerStmt is invoked after the owner has been + * changed locally. Since changing the owner could result in new dependencies being found + * for this object we re-ensure all the dependencies for the dictionary do exist. This + * is solely to propagate the new owner (and all its dependencies) if it was not already + * distributed in the cluster. + */ +List * +PostprocessAlterTextSearchDictionaryOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + /* dependencies have changed (owner) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + /* * GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION * based on the configuration as defined in the catalog identified by tsconfigOid. @@ -465,6 +862,65 @@ GetTextSearchConfigDefineStmt(Oid tsconfigOid) } +/* + * GetTextSearchDictionaryDefineStmt returns the DefineStmt for a TEXT SEARCH DICTIONARY + * based on the dictionary as defined in the catalog identified by tsdictOid. + * + * This statement will contain the template along with all initilaization options. + */ +static DefineStmt * +GetTextSearchDictionaryDefineStmt(Oid tsdictOid) +{ + HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search dictionary %u", + tsdictOid); + } + Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup); + + DefineStmt *stmt = makeNode(DefineStmt); + stmt->kind = OBJECT_TSDICTIONARY; + stmt->defnames = get_ts_dict_namelist(tsdictOid); + stmt->definition = GetTextSearchDictionaryInitOptions(tup, dict); + + ReleaseSysCache(tup); + return stmt; +} + + +/* + * GetTextSearchDictionaryInitOptions returns the list of DefElem for the initialization + * options for a TEXT SEARCH DICTIONARY. + * + * The initialization options contain both the template name, and template specific key, + * value pairs that are supplied when the dictionary was first created. + */ +static List * +GetTextSearchDictionaryInitOptions(HeapTuple tup, Form_pg_ts_dict dict) +{ + List *templateNameList = get_ts_template_namelist(dict->dicttemplate); + TypeName *templateTypeName = makeTypeNameFromNameList(templateNameList); + DefElem *templateDefElem = makeDefElem("template", (Node *) templateTypeName, -1); + + Relation TSDictionaryRelation = table_open(TSDictionaryRelationId, AccessShareLock); + TupleDesc TSDictDescription = RelationGetDescr(TSDictionaryRelation); + bool isnull = false; + Datum dictinitoption = heap_getattr(tup, Anum_pg_ts_dict_dictinitoption, + TSDictDescription, &isnull); + + List *initOptionDefElemList = NIL; + if (!isnull) + { + initOptionDefElemList = deserialize_deflist(dictinitoption); + } + + table_close(TSDictionaryRelation, AccessShareLock); + + return lcons(templateDefElem, initOptionDefElemList); +} + + /* * GetTextSearchConfigCommentStmt returns a list containing all entries to recreate a * comment on the configuration identified by tsconfigOid. The list could be empty if @@ -492,6 +948,33 @@ GetTextSearchConfigCommentStmt(Oid tsconfigOid) } +/* + * GetTextSearchDictionaryCommentStmt returns a list containing all entries to recreate a + * comment on the dictionary identified by tsconfigOid. The list could be empty if + * there is no comment on a dictionary. + * + * The reason for a list is for easy use when building a list of all statements to invoke + * to recreate the text search dictionary. An empty list can easily be concatinated + * without inspection, contrary to a NULL ptr if we would return the CommentStmt struct. + */ +static List * +GetTextSearchDictionaryCommentStmt(Oid tsdictOid) +{ + char *comment = GetComment(tsdictOid, TSDictionaryRelationId, 0); + if (!comment) + { + return NIL; + } + + CommentStmt *stmt = makeNode(CommentStmt); + stmt->objtype = OBJECT_TSDICTIONARY; + + stmt->object = (Node *) get_ts_dict_namelist(tsdictOid); + stmt->comment = comment; + return list_make1(stmt); +} + + /* * GetTextSearchConfigMappingStmt returns a list of all mappings from token_types to * dictionaries configured on a text search configuration identified by tsconfigOid. @@ -581,7 +1064,7 @@ GetTextSearchConfigMappingStmt(Oid tsconfigOid) * GetTextSearchConfigOwnerStmts returns a potentially empty list of statements to change * the ownership of a TEXT SEARCH CONFIGURATION object. * - * The list is for convenienve when building a full list of statements to recreate the + * The list is for convenience when building a full list of statements to recreate the * configuration. */ static List * @@ -605,6 +1088,34 @@ GetTextSearchConfigOwnerStmts(Oid tsconfigOid) } +/* + * GetTextSearchDictionaryOwnerStmts returns a potentially empty list of statements to change + * the ownership of a TEXT SEARCH DICTIONARY object. + * + * The list is for convenience when building a full list of statements to recreate the + * dictionary. + */ +static List * +GetTextSearchDictionaryOwnerStmts(Oid tsdictOid) +{ + HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search dictionary %u", + tsdictOid); + } + Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup); + + AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt); + stmt->objectType = OBJECT_TSDICTIONARY; + stmt->object = (Node *) get_ts_dict_namelist(tsdictOid); + stmt->newowner = GetRoleSpecObjectForUser(dict->dictowner); + + ReleaseSysCache(tup); + return list_make1(stmt); +} + + /* * get_ts_config_namelist based on the tsconfigOid this function creates the namelist that * identifies the configuration in a fully qualified manner, irregardless of the schema @@ -654,6 +1165,30 @@ get_ts_dict_namelist(Oid tsdictOid) } +/* + * get_ts_template_namelist based on the tstemplateOid this function creates the namelist + * that identifies the template in a fully qualified manner, irregardless of the schema + * existing on the search_path. + */ +static List * +get_ts_template_namelist(Oid tstemplateOid) +{ + HeapTuple tup = SearchSysCache1(TSTEMPLATEOID, ObjectIdGetDatum(tstemplateOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search template %u", tstemplateOid); + } + Form_pg_ts_template template = (Form_pg_ts_template) GETSTRUCT(tup); + + char *schema = get_namespace_name(template->tmplnamespace); + char *templateName = pstrdup(NameStr(template->tmplname)); + List *names = list_make2(makeString(schema), makeString(templateName)); + + ReleaseSysCache(tup); + return names; +} + + /* * get_ts_config_parser_oid based on the tsconfigOid this function returns the Oid of the * parser used in the configuration. @@ -753,6 +1288,25 @@ CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) } +/* + * CreateTextSearchDictObjectAddress resolves the ObjectAddress for the object + * being created. If missing_pk is false the function will error, explaining to the user + * the text search dictionary described in the statement doesn't exist. + */ +ObjectAddress +CreateTextSearchDictObjectAddress(Node *node, bool missing_ok) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + Assert(stmt->kind == OBJECT_TSDICTIONARY); + + Oid objid = get_ts_dict_oid(stmt->defnames, missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSDictionaryRelationId, objid); + return address; +} + + /* * RenameTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT * SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not @@ -772,6 +1326,25 @@ RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) } +/* + * RenameTextSearchDictionaryStmtObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH DICTIONARY being renamed. Optionally errors if the dictionary does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSDICTIONARY); + + Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSDictionaryRelationId, objid); + return address; +} + + /* * AlterTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT * SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not @@ -790,6 +1363,24 @@ AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) } +/* + * AlterTextSearchDictionaryStmtObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); + + Oid objid = get_ts_dict_oid(stmt->dictname, missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSDictionaryRelationId, objid); + return address; +} + + /* * AlterTextSearchConfigurationSchemaStmtObjectAddress resolves the ObjectAddress for the * TEXT SEARCH CONFIGURATION being moved to a different schema. Optionally errors if the @@ -843,6 +1434,59 @@ AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) } +/* + * AlterTextSearchDictionarySchemaStmtObjectAddress resolves the ObjectAddress for the + * TEXT SEARCH DICTIONARY being moved to a different schema. Optionally errors if the + * dictionary does not exist based on the missing_ok flag passed in by the caller. + * + * This can be called, either before or after the move of schema has been executed, hence + * the triple checking before the error might be thrown. Errors for non-existing schema's + * in edgecases will be raised by postgres while executing the move. + */ +ObjectAddress +AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + Oid objid = get_ts_dict_oid(castNode(List, stmt->object), true); + + if (!OidIsValid(objid)) + { + /* + * couldn't find the text search dictionary, might have already been moved to + * the new schema, we construct a new sequence name that uses the new schema to + * search in. + */ + char *schemaname = NULL; + char *dict_name = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaname, &dict_name); + + char *newSchemaName = stmt->newschema; + List *names = list_make2(makeString(newSchemaName), makeString(dict_name)); + objid = get_ts_dict_oid(names, true); + + if (!missing_ok && !OidIsValid(objid)) + { + /* + * if the text search dict id is still invalid we couldn't find it, error + * with the same message postgres would error with if missing_ok is false + * (not ok to miss) + */ + + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("text search dictionary \"%s\" does not exist", + NameListToString(castNode(List, stmt->object))))); + } + } + + ObjectAddress sequenceAddress = { 0 }; + ObjectAddressSet(sequenceAddress, TSDictionaryRelationId, objid); + return sequenceAddress; +} + + /* * TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT * SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the @@ -862,6 +1506,25 @@ TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) } +/* + * TextSearchDictCommentObjectAddress resolves the ObjectAddress for the TEXT SEARCH + * DICTIONARY on which the comment is placed. Optionally errors if the dictionary does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +TextSearchDictCommentObjectAddress(Node *node, bool missing_ok) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSDICTIONARY); + + Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSDictionaryRelationId, objid); + return address; +} + + /* * AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT * SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the @@ -880,6 +1543,24 @@ AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) } +/* + * AlterTextSearchDictOwnerObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH DICTIONARY for which the owner is changed. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +AlterTextSearchDictOwnerObjectAddress(Node *node, bool missing_ok) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Relation relation = NULL; + + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock, + missing_ok); +} + + /* * GenerateBackupNameForTextSearchConfiguration generates a safe name that is not in use * already that can be used to rename an existing TEXT SEARCH CONFIGURATION to allow the diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 48d1dd10e..e8ea461b4 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -277,7 +277,7 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("truncating foreign tables that are added to metadata " - "can only be excuted on the coordinator"))); + "can only be executed on the coordinator"))); } } } diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index c124388d4..74718ea59 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -57,6 +57,7 @@ #include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" #include "distributed/listutils.h" +#include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" @@ -93,6 +94,9 @@ static List * FilterNameListForDistributedTypes(List *objects, bool missing_ok); static List * TypeNameListToObjectAddresses(List *objects); static TypeName * MakeTypeNameFromRangeVar(const RangeVar *relation); static Oid GetTypeOwner(Oid typeOid); +static Oid LookupNonAssociatedArrayTypeNameOid(ParseState *pstate, + const TypeName *typeName, + bool missing_ok); /* recreate functions */ static CompositeTypeStmt * RecreateCompositeTypeStmt(Oid typeOid); @@ -132,28 +136,7 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString, /* fully qualify before lookup and later deparsing */ QualifyTreeNode(node); - /* - * reconstruct creation statement in a portable fashion. The create_or_replace helper - * function will be used to create the type in an idempotent manner on the workers. - * - * Types could exist on the worker prior to being created on the coordinator when the - * type previously has been attempted to be created in a transaction which did not - * commit on the coordinator. - */ - const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(node); - compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql); - - /* - * when we allow propagation within a transaction block we should make sure to only - * allow this in sequential mode - */ - EnsureSequentialMode(OBJECT_TYPE); - - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) compositeTypeStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NIL; } @@ -176,9 +159,39 @@ PostprocessCompositeTypeStmt(Node *node, const char *queryString) * locally it can't be missing */ ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); + + /* If the type has any unsupported dependency, create it locally */ + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&typeAddress); + if (errMsg != NULL) + { + RaiseDeferredError(errMsg, WARNING); + return NIL; + } + + /* + * when we allow propagation within a transaction block we should make sure to only + * allow this in sequential mode + */ + EnsureSequentialMode(OBJECT_TYPE); + EnsureDependenciesExistOnAllNodes(&typeAddress); - return NIL; + /* + * reconstruct creation statement in a portable fashion. The create_or_replace helper + * function will be used to create the type in an idempotent manner on the workers. + * + * Types could exist on the worker prior to being created on the coordinator when the + * type previously has been attempted to be created in a transaction which did not + * commit on the coordinator. + */ + const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(node); + compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) compositeTypeStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -247,22 +260,7 @@ PreprocessCreateEnumStmt(Node *node, const char *queryString, /* enforce fully qualified typeName for correct deparsing and lookup */ QualifyTreeNode(node); - /* reconstruct creation statement in a portable fashion */ - const char *createEnumStmtSql = DeparseCreateEnumStmt(node); - createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql); - - /* - * when we allow propagation within a transaction block we should make sure to only - * allow this in sequential mode - */ - EnsureSequentialMode(OBJECT_TYPE); - - /* to prevent recursion with mx we disable ddl propagation */ - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createEnumStmtSql, - ENABLE_DDL_PROPAGATION); - - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NIL; } @@ -284,9 +282,32 @@ PostprocessCreateEnumStmt(Node *node, const char *queryString) /* lookup type address of just created type */ ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false); + + DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&typeAddress); + if (errMsg != NULL) + { + RaiseDeferredError(errMsg, WARNING); + return NIL; + } + + /* + * when we allow propagation within a transaction block we should make sure to only + * allow this in sequential mode + */ + EnsureSequentialMode(OBJECT_TYPE); + EnsureDependenciesExistOnAllNodes(&typeAddress); - return NIL; + /* reconstruct creation statement in a portable fashion */ + const char *createEnumStmtSql = DeparseCreateEnumStmt(node); + createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createEnumStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -732,7 +753,7 @@ CompositeTypeStmtObjectAddress(Node *node, bool missing_ok) { CompositeTypeStmt *stmt = castNode(CompositeTypeStmt, node); TypeName *typeName = MakeTypeNameFromRangeVar(stmt->typevar); - Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + Oid typeOid = LookupNonAssociatedArrayTypeNameOid(NULL, typeName, missing_ok); ObjectAddress address = { 0 }; ObjectAddressSet(address, TypeRelationId, typeOid); @@ -753,7 +774,7 @@ CreateEnumStmtObjectAddress(Node *node, bool missing_ok) { CreateEnumStmt *stmt = castNode(CreateEnumStmt, node); TypeName *typeName = makeTypeNameFromNameList(stmt->typeName); - Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok); + Oid typeOid = LookupNonAssociatedArrayTypeNameOid(NULL, typeName, missing_ok); ObjectAddress address = { 0 }; ObjectAddressSet(address, TypeRelationId, typeOid); @@ -1158,3 +1179,32 @@ ShouldPropagateTypeCreate() return true; } + + +/* + * LookupNonAssociatedArrayTypeNameOid returns the oid of the type with the given type name + * that is not an array type that is associated to another user defined type. + */ +static Oid +LookupNonAssociatedArrayTypeNameOid(ParseState *pstate, const TypeName *typeName, + bool missing_ok) +{ + Type tup = LookupTypeName(NULL, typeName, NULL, missing_ok); + Oid typeOid = InvalidOid; + if (tup != NULL) + { + if (((Form_pg_type) GETSTRUCT(tup))->typelem == 0) + { + typeOid = ((Form_pg_type) GETSTRUCT(tup))->oid; + } + ReleaseSysCache(tup); + } + + if (!missing_ok && typeOid == InvalidOid) + { + elog(ERROR, "type \"%s\" that is not an array type associated with " + "another type does not exist", TypeNameToString(typeName)); + } + + return typeOid; +} diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 91e02a8ff..c45765bac 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -78,7 +78,7 @@ #include "utils/syscache.h" bool EnableDDLPropagation = true; /* ddl propagation is enabled */ -int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_DEFERRED; +int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; PropSetCmdBehavior PropagateSetCommands = PROPSETCMD_NONE; /* SET prop off */ static bool shouldInvalidateForeignKeyGraph = false; static int activeAlterTables = 0; diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index 13b52790a..7b89b3e96 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -870,7 +870,19 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount) int eventMask = MultiConnectionStateEventMask(connectionState); - AddWaitEventToSet(waitEventSet, eventMask, sock, NULL, connectionState); + int waitEventSetIndex = + CitusAddWaitEventSetToSet(waitEventSet, eventMask, sock, + NULL, (void *) connectionState); + if (waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED) + { + ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("connection establishment for node %s:%d failed", + connectionState->connection->hostname, + connectionState->connection->port), + errhint("Check both the local and remote server logs for the " + "connection establishment errors."))); + } + numEventsAdded++; if (waitCount) @@ -1020,7 +1032,19 @@ FinishConnectionListEstablishment(List *multiConnectionList) { /* connection state changed, reset the event mask */ uint32 eventMask = MultiConnectionStateEventMask(connectionState); - ModifyWaitEvent(waitEventSet, event->pos, eventMask, NULL); + bool success = + CitusModifyWaitEvent(waitEventSet, event->pos, + eventMask, NULL); + if (!success) + { + ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("connection establishment for node %s:%d " + "failed", connection->hostname, + connection->port), + errhint("Check both the local and remote server " + "logs for the connection establishment " + "errors."))); + } } /* @@ -1521,3 +1545,95 @@ MarkConnectionConnected(MultiConnection *connection) INSTR_TIME_SET_CURRENT(connection->connectionEstablishmentEnd); } } + + +/* + * CitusAddWaitEventSetToSet is a wrapper around Postgres' AddWaitEventToSet(). + * + * AddWaitEventToSet() may throw hard errors. For example, when the + * underlying socket for a connection is closed by the remote server + * and already reflected by the OS, however Citus hasn't had a chance + * to get this information. In that case, if replication factor is >1, + * Citus can failover to other nodes for executing the query. Even if + * replication factor = 1, Citus can give much nicer errors. + * + * So CitusAddWaitEventSetToSet simply puts ModifyWaitEvent into a + * PG_TRY/PG_CATCH block in order to catch any hard errors, and + * returns this information to the caller. + */ +int +CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd, + Latch *latch, void *user_data) +{ + volatile int waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED; + MemoryContext savedContext = CurrentMemoryContext; + + PG_TRY(); + { + waitEventSetIndex = + AddWaitEventToSet(set, events, fd, latch, (void *) user_data); + } + PG_CATCH(); + { + /* + * We might be in an arbitrary memory context when the + * error is thrown and we should get back to one we had + * at PG_TRY() time, especially because we are not + * re-throwing the error. + */ + MemoryContextSwitchTo(savedContext); + + FlushErrorState(); + + /* let the callers know about the failure */ + waitEventSetIndex = WAIT_EVENT_SET_INDEX_FAILED; + } + PG_END_TRY(); + + return waitEventSetIndex; +} + + +/* + * CitusModifyWaitEvent is a wrapper around Postgres' ModifyWaitEvent(). + * + * ModifyWaitEvent may throw hard errors. For example, when the underlying + * socket for a connection is closed by the remote server and already + * reflected by the OS, however Citus hasn't had a chance to get this + * information. In that case, if replication factor is >1, Citus can + * failover to other nodes for executing the query. Even if replication + * factor = 1, Citus can give much nicer errors. + * + * So CitusModifyWaitEvent simply puts ModifyWaitEvent into a PG_TRY/PG_CATCH + * block in order to catch any hard errors, and returns this information to the + * caller. + */ +bool +CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch) +{ + volatile bool success = true; + MemoryContext savedContext = CurrentMemoryContext; + + PG_TRY(); + { + ModifyWaitEvent(set, pos, events, latch); + } + PG_CATCH(); + { + /* + * We might be in an arbitrary memory context when the + * error is thrown and we should get back to one we had + * at PG_TRY() time, especially because we are not + * re-throwing the error. + */ + MemoryContextSwitchTo(savedContext); + + FlushErrorState(); + + /* let the callers know about the failure */ + success = false; + } + PG_END_TRY(); + + return success; +} diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index 6511a675c..4c1aae6bf 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -906,8 +906,20 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) else if (sendStatus == 0) { /* done writing, only wait for read events */ - ModifyWaitEvent(waitEventSet, event->pos, WL_SOCKET_READABLE, - NULL); + bool success = + CitusModifyWaitEvent(waitEventSet, event->pos, + WL_SOCKET_READABLE, NULL); + if (!success) + { + ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("connection establishment for " + "node %s:%d failed", + connection->hostname, + connection->port), + errhint("Check both the local and remote " + "server logs for the connection " + "establishment errors."))); + } } } @@ -1052,8 +1064,17 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount, * and writeability (server is ready to receive bytes). */ int eventMask = WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE; - - AddWaitEventToSet(waitEventSet, eventMask, sock, NULL, (void *) connection); + int waitEventSetIndex = + CitusAddWaitEventSetToSet(waitEventSet, eventMask, sock, + NULL, (void *) connection); + if (waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED) + { + ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("connection establishment for node %s:%d failed", + connection->hostname, connection->port), + errhint("Check both the local and remote server logs for the " + "connection establishment errors."))); + } } /* diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index e128e19f3..1828362cb 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -124,6 +124,48 @@ pg_get_extensiondef_string(Oid tableRelationId) } +/* + * get_extension_version - given an extension OID, fetch its extversion + * or NULL if not found. + */ +char * +get_extension_version(Oid extensionId) +{ + char *versionName = NULL; + + Relation relation = table_open(ExtensionRelationId, AccessShareLock); + + ScanKeyData entry[1]; + ScanKeyInit(&entry[0], + Anum_pg_extension_oid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(extensionId)); + + SysScanDesc scanDesc = systable_beginscan(relation, ExtensionOidIndexId, true, + NULL, 1, entry); + + HeapTuple tuple = systable_getnext(scanDesc); + + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(tuple)) + { + bool isNull = false; + Datum versionDatum = heap_getattr(tuple, Anum_pg_extension_extversion, + RelationGetDescr(relation), &isNull); + if (!isNull) + { + versionName = text_to_cstring(DatumGetTextPP(versionDatum)); + } + } + + systable_endscan(scanDesc); + + table_close(relation, AccessShareLock); + + return versionName; +} + + /* * get_extension_schema - given an extension OID, fetch its extnamespace * diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c index f0b61db18..d58faabfb 100644 --- a/src/backend/distributed/deparser/deparse_function_stmts.c +++ b/src/backend/distributed/deparser/deparse_function_stmts.c @@ -59,6 +59,7 @@ static void AppendDefElemParallel(StringInfo buf, DefElem *def); static void AppendDefElemCost(StringInfo buf, DefElem *def); static void AppendDefElemRows(StringInfo buf, DefElem *def); static void AppendDefElemSet(StringInfo buf, DefElem *def); +static void AppendDefElemSupport(StringInfo buf, DefElem *def); static void AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt); static void AppendRenameFunctionStmt(StringInfo buf, RenameStmt *stmt); @@ -179,6 +180,10 @@ AppendDefElem(StringInfo buf, DefElem *def) { AppendDefElemSet(buf, def); } + else if (strcmp(def->defname, "support") == 0) + { + AppendDefElemSupport(buf, def); + } } @@ -282,6 +287,16 @@ AppendDefElemSet(StringInfo buf, DefElem *def) } +/* + * AppendDefElemSupport appends a string representing the DefElem to a buffer + */ +static void +AppendDefElemSupport(StringInfo buf, DefElem *def) +{ + appendStringInfo(buf, " SUPPORT %s", defGetString(def)); +} + + /* * AppendVariableSet appends a string representing the VariableSetStmt to a buffer */ diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index f8c3d35a1..ebc76d5e8 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -87,16 +87,6 @@ DeparseAlterSchemaRenameStmt(Node *node) static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt) { - if (stmt->schemaElts != NIL) - { - elog(ERROR, "schema creating is not supported with other create commands"); - } - - if (stmt->schemaname == NULL) - { - elog(ERROR, "schema name should be specified"); - } - appendStringInfoString(buf, "CREATE SCHEMA "); if (stmt->if_not_exists) @@ -104,7 +94,18 @@ AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt) appendStringInfoString(buf, "IF NOT EXISTS "); } - appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname)); + if (stmt->schemaname != NULL) + { + appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname)); + } + else + { + /* + * If the schema name is not provided, the schema will be created + * with the name of the authorizated user. + */ + Assert(stmt->authrole != NULL); + } if (stmt->authrole != NULL) { diff --git a/src/backend/distributed/deparser/deparse_text_search.c b/src/backend/distributed/deparser/deparse_text_search.c index e1ac44f5a..43d162678 100644 --- a/src/backend/distributed/deparser/deparse_text_search.c +++ b/src/backend/distributed/deparser/deparse_text_search.c @@ -12,27 +12,28 @@ #include "postgres.h" #include "catalog/namespace.h" +#include "commands/defrem.h" #include "utils/builtins.h" #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -static void AppendDefElemList(StringInfo buf, List *defelms); +static void AppendDefElemList(StringInfo buf, List *defelems, char *objectName); static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes); static void AppendStringInfoDictnames(StringInfo buf, List *dicts); /* - * DeparseCreateTextSearchStmt returns the sql for a DefineStmt defining a TEXT SEARCH - * CONFIGURATION + * DeparseCreateTextSearchConfigurationStmt returns the sql for a DefineStmt defining a + * TEXT SEARCH CONFIGURATION * * Although the syntax is mutually exclusive on the two arguments that can be passed in * the deparser will syntactically correct multiple definitions if provided. * */ char * -DeparseCreateTextSearchStmt(Node *node) +DeparseCreateTextSearchConfigurationStmt(Node *node) { DefineStmt *stmt = castNode(DefineStmt, node); @@ -42,7 +43,7 @@ DeparseCreateTextSearchStmt(Node *node) const char *identifier = NameListToQuotedString(stmt->defnames); appendStringInfo(&buf, "CREATE TEXT SEARCH CONFIGURATION %s ", identifier); appendStringInfoString(&buf, "("); - AppendDefElemList(&buf, stmt->definition); + AppendDefElemList(&buf, stmt->definition, "CONFIGURATION"); appendStringInfoString(&buf, ");"); return buf.data; @@ -50,13 +51,38 @@ DeparseCreateTextSearchStmt(Node *node) /* - * AppendDefElemList specialization to append a comma separated list of definitions to a + * DeparseCreateTextSearchDictionaryStmt returns the sql for a DefineStmt defining a + * TEXT SEARCH DICTIONARY + * + * Although the syntax is mutually exclusive on the two arguments that can be passed in + * the deparser will syntactically correct multiple definitions if provided. * + */ +char * +DeparseCreateTextSearchDictionaryStmt(Node *node) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + const char *identifier = NameListToQuotedString(stmt->defnames); + appendStringInfo(&buf, "CREATE TEXT SEARCH DICTIONARY %s ", identifier); + appendStringInfoString(&buf, "("); + AppendDefElemList(&buf, stmt->definition, "DICTIONARY"); + appendStringInfoString(&buf, ");"); + + return buf.data; +} + + +/* + * AppendDefElemList is a helper to append a comma separated list of definitions to a * define statement. * - * Currently only supports String and TypeName entries. Will error on others. + * The extra objectName parameter is used to create meaningful error messages. */ static void -AppendDefElemList(StringInfo buf, List *defelems) +AppendDefElemList(StringInfo buf, List *defelems, char *objectName) { DefElem *defelem = NULL; bool first = true; @@ -68,32 +94,25 @@ AppendDefElemList(StringInfo buf, List *defelems) } first = false; - /* extract identifier from defelem */ - const char *identifier = NULL; - switch (nodeTag(defelem->arg)) + /* + * There are some operations that can omit the argument. In that case, we only use + * the defname. + * + * For example, omitting [ = value ] in the next query results in resetting the + * option to defaults: + * ALTER TEXT SEARCH DICTIONARY name ( option [ = value ] ); + */ + if (defelem->arg == NULL) { - case T_String: - { - identifier = quote_identifier(strVal(defelem->arg)); - break; - } - - case T_TypeName: - { - TypeName *typeName = castNode(TypeName, defelem->arg); - identifier = NameListToQuotedString(typeName->names); - break; - } - - default: - { - ereport(ERROR, (errmsg("unexpected argument during deparsing of " - "TEXT SEARCH CONFIGURATION definition"))); - } + appendStringInfo(buf, "%s", defelem->defname); + continue; } + /* extract value from defelem */ + const char *value = defGetString(defelem); + /* stringify */ - appendStringInfo(buf, "%s = %s", defelem->defname, identifier); + appendStringInfo(buf, "%s = %s", defelem->defname, value); } } @@ -136,6 +155,44 @@ DeparseDropTextSearchConfigurationStmt(Node *node) } +/* + * DeparseDropTextSearchDictionaryStmt returns the sql representation for a DROP TEXT SEARCH + * DICTIONARY ... statment. Supports dropping multiple dictionaries at once. + */ +char * +DeparseDropTextSearchDictionaryStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSDICTIONARY); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfoString(&buf, "DROP TEXT SEARCH DICTIONARY "); + List *nameList = NIL; + bool first = true; + foreach_ptr(nameList, stmt->objects) + { + if (!first) + { + appendStringInfoString(&buf, ", "); + } + first = false; + + appendStringInfoString(&buf, NameListToQuotedString(nameList)); + } + + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(&buf, " CASCADE"); + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + /* * DeparseRenameTextSearchConfigurationStmt returns the sql representation of a ALTER TEXT * SEARCH CONFIGURATION ... RENAME TO ... statement. @@ -158,7 +215,28 @@ DeparseRenameTextSearchConfigurationStmt(Node *node) /* - * DeparseAlterTextSearchConfigurationStmt returns the ql representation of any generic + * DeparseRenameTextSearchDictionaryStmt returns the sql representation of a ALTER TEXT SEARCH + * DICTIONARY ... RENAME TO ... statement. + */ +char * +DeparseRenameTextSearchDictionaryStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSDICTIONARY); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + char *identifier = NameListToQuotedString(castNode(List, stmt->object)); + appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s RENAME TO %s;", + identifier, quote_identifier(stmt->newname)); + + return buf.data; +} + + +/* + * DeparseAlterTextSearchConfigurationStmt returns the sql representation of any generic * ALTER TEXT SEARCH CONFIGURATION .... statement. The statements supported include: * - ALTER TEXT SEARCH CONFIGURATIONS ... ADD MAPPING FOR [, ...] WITH [, ...] * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] WITH [, ...] @@ -253,6 +331,28 @@ DeparseAlterTextSearchConfigurationStmt(Node *node) } +/* + * DeparseAlterTextSearchConfigurationStmt returns the sql representation of any generic + * ALTER TEXT SEARCH DICTIONARY .... statement. The statements supported include + * - ALTER TEXT SEARCH DICTIONARY name ( option [ = value ] [, ... ] ) + */ +char * +DeparseAlterTextSearchDictionaryStmt(Node *node) +{ + AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + char *identifier = NameListToQuotedString(castNode(List, stmt->dictname)); + appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s ( ", identifier); + + AppendDefElemList(&buf, stmt->options, "DICTIONARY"); + appendStringInfoString(&buf, " );"); + return buf.data; +} + + /* * DeparseAlterTextSearchConfigurationSchemaStmt returns the sql statement representing * ALTER TEXT SEARCH CONFIGURATION ... SET SCHEMA ... statements. @@ -274,6 +374,27 @@ DeparseAlterTextSearchConfigurationSchemaStmt(Node *node) } +/* + * DeparseAlterTextSearchDictionarySchemaStmt returns the sql statement representing ALTER TEXT + * SEARCH DICTIONARY ... SET SCHEMA ... statements. + */ +char * +DeparseAlterTextSearchDictionarySchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s SET SCHEMA %s;", + NameListToQuotedString(castNode(List, stmt->object)), + quote_identifier(stmt->newschema)); + + return buf.data; +} + + /* * DeparseTextSearchConfigurationCommentStmt returns the sql statement representing * COMMENT ON TEXT SEARCH CONFIGURATION ... IS ... @@ -305,6 +426,37 @@ DeparseTextSearchConfigurationCommentStmt(Node *node) } +/* + * DeparseTextSearchDictionaryCommentStmt returns the sql statement representing + * COMMENT ON TEXT SEARCH DICTIONARY ... IS ... + */ +char * +DeparseTextSearchDictionaryCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSDICTIONARY); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "COMMENT ON TEXT SEARCH DICTIONARY %s IS ", + NameListToQuotedString(castNode(List, stmt->object))); + + if (stmt->comment == NULL) + { + appendStringInfoString(&buf, "NULL"); + } + else + { + appendStringInfoString(&buf, quote_literal_cstr(stmt->comment)); + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + /* * AppendStringInfoTokentypeList specializes in adding a comma separated list of * token_tyoe's to TEXT SEARCH CONFIGURATION commands @@ -375,3 +527,24 @@ DeparseAlterTextSearchConfigurationOwnerStmt(Node *node) return buf.data; } + + +/* + * DeparseAlterTextSearchDictionaryOwnerStmt returns the sql statement representing ALTER TEXT + * SEARCH DICTIONARY ... ONWER TO ... commands. + */ +char * +DeparseAlterTextSearchDictionaryOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s OWNER TO %s;", + NameListToQuotedString(castNode(List, stmt->object)), + RoleSpecString(stmt->newowner, true)); + + return buf.data; +} diff --git a/src/backend/distributed/deparser/qualify_collation_stmt.c b/src/backend/distributed/deparser/qualify_collation_stmt.c index e1fcc2b50..dad3b7a0e 100644 --- a/src/backend/distributed/deparser/qualify_collation_stmt.c +++ b/src/backend/distributed/deparser/qualify_collation_stmt.c @@ -124,7 +124,6 @@ QualifyCollationName(List *name) (Form_pg_collation) GETSTRUCT(colltup); schemaName = get_namespace_name(collationForm->collnamespace); - collationName = NameStr(collationForm->collname); name = list_make2(makeString(schemaName), makeString(collationName)); ReleaseSysCache(colltup); } diff --git a/src/backend/distributed/deparser/qualify_text_search_stmts.c b/src/backend/distributed/deparser/qualify_text_search_stmts.c index 42c98039a..5e66b06ff 100644 --- a/src/backend/distributed/deparser/qualify_text_search_stmts.c +++ b/src/backend/distributed/deparser/qualify_text_search_stmts.c @@ -69,6 +69,44 @@ QualifyDropTextSearchConfigurationStmt(Node *node) } +/* + * QualifyDropTextSearchDictionaryStmt adds any missing schema names to text search + * dictionaries being dropped. All dictionaries are expected to exists before fully + * qualifying the statement. Errors will be raised for objects not existing. Non-existing + * objects are expected to not be distributed. + */ +void +QualifyDropTextSearchDictionaryStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSDICTIONARY); + + List *qualifiedObjects = NIL; + List *objName = NIL; + + foreach_ptr(objName, stmt->objects) + { + char *schemaName = NULL; + char *tsdictName = NULL; + DeconstructQualifiedName(objName, &schemaName, &tsdictName); + + if (!schemaName) + { + Oid tsdictOid = get_ts_dict_oid(objName, false); + Oid namespaceOid = get_ts_dict_namespace(tsdictOid); + schemaName = get_namespace_name(namespaceOid); + + objName = list_make2(makeString(schemaName), + makeString(tsdictName)); + } + + qualifiedObjects = lappend(qualifiedObjects, objName); + } + + stmt->objects = qualifiedObjects; +} + + /* * QualifyAlterTextSearchConfigurationStmt adds the schema name (if missing) to the name * of the text search configurations, as well as the dictionaries referenced. @@ -128,6 +166,32 @@ QualifyAlterTextSearchConfigurationStmt(Node *node) } +/* + * QualifyAlterTextSearchDictionaryStmt adds the schema name (if missing) to the name + * of the text search dictionary. + */ +void +QualifyAlterTextSearchDictionaryStmt(Node *node) +{ + AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(stmt->dictname, &schemaName, &objName); + + /* fully qualify the dictname being altered */ + if (!schemaName) + { + Oid tsdictOid = get_ts_dict_oid(stmt->dictname, false); + Oid namespaceOid = get_ts_dict_namespace(tsdictOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->dictname = list_make2(makeString(schemaName), + makeString(objName)); + } +} + + /* * QualifyRenameTextSearchConfigurationStmt adds the schema name (if missing) to the * configuration being renamed. The new name will kept be without schema name since this @@ -156,9 +220,37 @@ QualifyRenameTextSearchConfigurationStmt(Node *node) } +/* + * QualifyRenameTextSearchDictionaryStmt adds the schema name (if missing) to the + * dictionary being renamed. The new name will kept be without schema name since this + * command cannot be used to change the schema of a dictionary. + */ +void +QualifyRenameTextSearchDictionaryStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSDICTIONARY); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + /* fully qualify the dictname being altered */ + if (!schemaName) + { + Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_dict_namespace(tsdictOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + /* * QualifyAlterTextSearchConfigurationSchemaStmt adds the schema name (if missing) for the - * text search being moved to a new schema. + * text search config being moved to a new schema. */ void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node) @@ -182,6 +274,32 @@ QualifyAlterTextSearchConfigurationSchemaStmt(Node *node) } +/* + * QualifyAlterTextSearchDictionarySchemaStmt adds the schema name (if missing) for the + * text search dictionary being moved to a new schema. + */ +void +QualifyAlterTextSearchDictionarySchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_dict_namespace(tsdictOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + /* * QualifyTextSearchConfigurationCommentStmt adds the schema name (if missing) to the * configuration name on which the comment is created. @@ -208,6 +326,32 @@ QualifyTextSearchConfigurationCommentStmt(Node *node) } +/* + * QualifyTextSearchDictionaryCommentStmt adds the schema name (if missing) to the + * dictionary name on which the comment is created. + */ +void +QualifyTextSearchDictionaryCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSDICTIONARY); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_dict_namespace(tsdictOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + /* * QualifyAlterTextSearchConfigurationOwnerStmt adds the schema name (if missing) to the * configuration for which the owner is changing. @@ -234,6 +378,32 @@ QualifyAlterTextSearchConfigurationOwnerStmt(Node *node) } +/* + * QualifyAlterTextSearchDictionaryOwnerStmt adds the schema name (if missing) to the + * dictionary for which the owner is changing. + */ +void +QualifyAlterTextSearchDictionaryOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSDICTIONARY); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_dict_namespace(tsdictOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + /* * get_ts_config_namespace returns the oid of the namespace which is housing the text * search configuration identified by tsconfigOid. diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 1f04751bb..2b32916ee 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -178,8 +178,6 @@ #include "utils/timestamp.h" #define SLOW_START_DISABLED 0 -#define WAIT_EVENT_SET_INDEX_NOT_INITIALIZED -1 -#define WAIT_EVENT_SET_INDEX_FAILED -2 /* @@ -678,10 +676,6 @@ static int UsableConnectionCount(WorkerPool *workerPool); static long NextEventTimeout(DistributedExecution *execution); static WaitEventSet * BuildWaitEventSet(List *sessionList); static void RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList); -static int CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd, - Latch *latch, void *user_data); -static bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, - Latch *latch); static TaskPlacementExecution * PopPlacementExecution(WorkerSession *session); static TaskPlacementExecution * PopAssignedPlacementExecution(WorkerSession *session); static TaskPlacementExecution * PopUnassignedPlacementExecution(WorkerPool *workerPool); @@ -1442,6 +1436,15 @@ DistributedExecutionRequiresRollback(List *taskList) return true; } + if (task->queryCount > 1) + { + /* + * When there are multiple sequential queries in a task + * we need to run those as a transaction. + */ + return true; + } + return false; } @@ -5367,6 +5370,19 @@ BuildWaitEventSet(List *sessionList) CitusAddWaitEventSetToSet(waitEventSet, connection->waitFlags, sock, NULL, (void *) session); session->waitEventSetIndex = waitEventSetIndex; + + /* + * Inform failed to add to wait event set with a debug message as this + * is too detailed information for users. + */ + if (session->waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED) + { + ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("Adding wait event for node %s:%d failed. " + "The socket was: %d", + session->workerPool->nodeName, + session->workerPool->nodePort, sock))); + } } CitusAddWaitEventSetToSet(waitEventSet, WL_POSTMASTER_DEATH, PGINVALID_SOCKET, NULL, @@ -5378,64 +5394,6 @@ BuildWaitEventSet(List *sessionList) } -/* - * CitusAddWaitEventSetToSet is a wrapper around Postgres' AddWaitEventToSet(). - * - * AddWaitEventToSet() may throw hard errors. For example, when the - * underlying socket for a connection is closed by the remote server - * and already reflected by the OS, however Citus hasn't had a chance - * to get this information. In that case, if replication factor is >1, - * Citus can failover to other nodes for executing the query. Even if - * replication factor = 1, Citus can give much nicer errors. - * - * So CitusAddWaitEventSetToSet simply puts ModifyWaitEvent into a - * PG_TRY/PG_CATCH block in order to catch any hard errors, and - * returns this information to the caller. - */ -static int -CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd, - Latch *latch, void *user_data) -{ - volatile int waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED; - MemoryContext savedContext = CurrentMemoryContext; - - PG_TRY(); - { - waitEventSetIndex = - AddWaitEventToSet(set, events, fd, latch, (void *) user_data); - } - PG_CATCH(); - { - /* - * We might be in an arbitrary memory context when the - * error is thrown and we should get back to one we had - * at PG_TRY() time, especially because we are not - * re-throwing the error. - */ - MemoryContextSwitchTo(savedContext); - - FlushErrorState(); - - if (user_data != NULL) - { - WorkerSession *workerSession = (WorkerSession *) user_data; - - ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("Adding wait event for node %s:%d failed. " - "The socket was: %d", - workerSession->workerPool->nodeName, - workerSession->workerPool->nodePort, fd))); - } - - /* let the callers know about the failure */ - waitEventSetIndex = WAIT_EVENT_SET_INDEX_FAILED; - } - PG_END_TRY(); - - return waitEventSetIndex; -} - - /* * GetEventSetSize returns the event set size for a list of sessions. */ @@ -5485,7 +5443,7 @@ RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList) if (!success) { ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("Modifying wait event for node %s:%d failed. " + errmsg("modifying wait event for node %s:%d failed. " "The wait event index was: %d", connection->hostname, connection->port, waitEventSetIndex))); @@ -5496,51 +5454,6 @@ RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList) } -/* - * CitusModifyWaitEvent is a wrapper around Postgres' ModifyWaitEvent(). - * - * ModifyWaitEvent may throw hard errors. For example, when the underlying - * socket for a connection is closed by the remote server and already - * reflected by the OS, however Citus hasn't had a chance to get this - * information. In that case, if replication factor is >1, Citus can - * failover to other nodes for executing the query. Even if replication - * factor = 1, Citus can give much nicer errors. - * - * So CitusModifyWaitEvent simply puts ModifyWaitEvent into a PG_TRY/PG_CATCH - * block in order to catch any hard errors, and returns this information to the - * caller. - */ -static bool -CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch) -{ - volatile bool success = true; - MemoryContext savedContext = CurrentMemoryContext; - - PG_TRY(); - { - ModifyWaitEvent(set, pos, events, latch); - } - PG_CATCH(); - { - /* - * We might be in an arbitrary memory context when the - * error is thrown and we should get back to one we had - * at PG_TRY() time, especially because we are not - * re-throwing the error. - */ - MemoryContextSwitchTo(savedContext); - - FlushErrorState(); - - /* let the callers know about the failure */ - success = false; - } - PG_END_TRY(); - - return success; -} - - /* * SetLocalForceMaxQueryParallelization is simply a C interface for setting * the following: diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index a47dc6a48..f3ee37c23 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -788,6 +788,11 @@ GetObjectTypeString(ObjectType objType) return "text search configuration"; } + case OBJECT_TSDICTIONARY: + { + return "text search dictionary"; + } + case OBJECT_TYPE: { return "type"; diff --git a/src/backend/distributed/executor/partitioned_intermediate_results.c b/src/backend/distributed/executor/partitioned_intermediate_results.c index 129a7d130..bae4d2ef5 100644 --- a/src/backend/distributed/executor/partitioned_intermediate_results.c +++ b/src/backend/distributed/executor/partitioned_intermediate_results.c @@ -16,6 +16,7 @@ #include "miscadmin.h" #include "port.h" +#include "access/hash.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_type.h" @@ -349,6 +350,12 @@ QueryTupleShardSearchInfo(ArrayType *minValuesArray, ArrayType *maxValuesArray, hashFunction = palloc0(sizeof(FmgrInfo)); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext); + + if (!OidIsValid(hashFunction->fn_oid)) + { + ereport(ERROR, (errmsg("no hash function defined for type %s", + format_type_be(partitionColumn->vartype)))); + } } ShardInterval **shardIntervalArray = palloc0(partitionCount * diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 73d41d811..a291b8702 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -600,7 +600,7 @@ SupportedDependencyByCitus(const ObjectAddress *address) { case OCLASS_SCHEMA: { - return true; + return !isTempNamespace(address->objectId); } default: @@ -631,11 +631,15 @@ SupportedDependencyByCitus(const ObjectAddress *address) } case OCLASS_COLLATION: - case OCLASS_SCHEMA: { return true; } + case OCLASS_SCHEMA: + { + return !isTempNamespace(address->objectId); + } + case OCLASS_PROC: { return true; @@ -676,6 +680,11 @@ SupportedDependencyByCitus(const ObjectAddress *address) return true; } + case OCLASS_TSDICT: + { + return true; + } + case OCLASS_TYPE: { switch (get_typtype(address->objectId)) @@ -771,14 +780,16 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) #endif /* - * If the given object is a procedure, we want to create it locally, - * so provide that information in the error detail. + * We expect callers to interpret the error returned from this function + * as a warning if the object itself is just being created. In that case, + * we expect them to report below error detail as well to indicate that + * object itself will not be propagated but will still be created locally. + * + * Otherwise, callers are expected to throw the error returned from this + * function as a hard one by ignoring the detail part. */ - if (getObjectClass(objectAddress) == OCLASS_PROC) - { - appendStringInfo(detailInfo, "\"%s\" will be created only locally", - objectDescription); - } + appendStringInfo(detailInfo, "\"%s\" will be created only locally", + objectDescription); if (SupportedDependencyByCitus(undistributableDependency)) { @@ -794,9 +805,7 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) objectDescription); return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - errorInfo->data, - strlen(detailInfo->data) == 0 ? NULL : detailInfo->data, - hintInfo->data); + errorInfo->data, detailInfo->data, hintInfo->data); } appendStringInfo(errorInfo, "\"%s\" has dependency on unsupported " @@ -804,9 +813,7 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress) dependencyDescription); return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - errorInfo->data, - strlen(detailInfo->data) == 0 ? NULL : detailInfo->data, - NULL); + errorInfo->data, detailInfo->data, NULL); } @@ -857,9 +864,13 @@ GetUndistributableDependency(const ObjectAddress *objectAddress) if (!SupportedDependencyByCitus(dependency)) { /* - * Since roles should be handled manually with Citus community, skip them. + * Skip roles and text search templates. + * + * Roles should be handled manually with Citus community whereas text search + * templates should be handled manually in both community and enterprise */ - if (getObjectClass(dependency) != OCLASS_ROLE) + if (getObjectClass(dependency) != OCLASS_ROLE && + getObjectClass(dependency) != OCLASS_TSTEMPLATE) { return dependency; } @@ -1259,7 +1270,7 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe /* * Tables could have indexes. Indexes themself could have dependencies that - * need to be propagated. eg. TEXT SEARCH CONFIGRUATIONS. Here we add the + * need to be propagated. eg. TEXT SEARCH CONFIGURATIONS. Here we add the * addresses of all indices to the list of objects to vist, as to make sure we * create all objects required by the indices before we create the table * including indices. diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 6cce0688b..8f5ebae97 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -169,6 +169,7 @@ typedef struct MetadataCacheData Oid citusTableIsVisibleFuncId; Oid relationIsAKnownShardFuncId; Oid jsonbExtractPathFuncId; + Oid jsonbExtractPathTextFuncId; bool databaseNameValid; char databaseName[NAMEDATALEN]; } MetadataCacheData; @@ -2726,6 +2727,24 @@ JsonbExtractPathFuncId(void) } +/* + * JsonbExtractPathTextFuncId returns oid of the jsonb_extract_path_text function. + */ +Oid +JsonbExtractPathTextFuncId(void) +{ + if (MetadataCache.jsonbExtractPathTextFuncId == InvalidOid) + { + const int argCount = 2; + + MetadataCache.jsonbExtractPathTextFuncId = + FunctionOid("pg_catalog", "jsonb_extract_path_text", argCount); + } + + return MetadataCache.jsonbExtractPathTextFuncId; +} + + /* * CurrentDatabaseName gets the name of the current database and caches * the result. diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index dc501923e..370ce1b0e 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -1487,13 +1487,10 @@ GetDependentSequencesWithRelation(Oid relationId, List **attnumList, table_close(depRel, AccessShareLock); - ListCell *attrdefOidCell = NULL; - ListCell *attrdefAttnumCell = NULL; - forboth(attrdefOidCell, attrdefResult, attrdefAttnumCell, attrdefAttnumResult) + AttrNumber attrdefAttnum = InvalidAttrNumber; + Oid attrdefOid = InvalidOid; + forboth_int_oid(attrdefAttnum, attrdefAttnumResult, attrdefOid, attrdefResult) { - Oid attrdefOid = lfirst_oid(attrdefOidCell); - AttrNumber attrdefAttnum = lfirst_int(attrdefAttnumCell); - List *sequencesFromAttrDef = GetSequencesFromAttrDef(attrdefOid); /* to simplify and eliminate cases like "DEFAULT nextval('..') - nextval('..')" */ @@ -1689,14 +1686,10 @@ SequenceDependencyCommandList(Oid relationId) ExtractDefaultColumnsAndOwnedSequences(relationId, &columnNameList, &sequenceIdList); - ListCell *columnNameCell = NULL; - ListCell *sequenceIdCell = NULL; - - forboth(columnNameCell, columnNameList, sequenceIdCell, sequenceIdList) + char *columnName = NULL; + Oid sequenceId = InvalidOid; + forboth_ptr_oid(columnName, columnNameList, sequenceId, sequenceIdList) { - char *columnName = lfirst(columnNameCell); - Oid sequenceId = lfirst_oid(sequenceIdCell); - if (!OidIsValid(sequenceId)) { /* diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index f9f070166..e857e1dda 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -126,6 +126,7 @@ static void ErrorIfCoordinatorMetadataSetFalse(WorkerNode *workerNode, Datum val char *field); static WorkerNode * SetShouldHaveShards(WorkerNode *workerNode, bool shouldHaveShards); static void RemoveOldShardPlacementForNodeGroup(int groupId); +static int FindCoordinatorNodeId(void); /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(citus_set_coordinator_host); @@ -148,6 +149,7 @@ PG_FUNCTION_INFO_V1(master_update_node); PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column); PG_FUNCTION_INFO_V1(citus_nodename_for_nodeid); PG_FUNCTION_INFO_V1(citus_nodeport_for_nodeid); +PG_FUNCTION_INFO_V1(citus_coordinator_nodeid); /* @@ -275,6 +277,24 @@ citus_add_node(PG_FUNCTION_ARGS) */ if (!nodeAlreadyExists) { + WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeNameString, nodePort); + + /* + * If the worker is not marked as a coordinator, check that + * the node is not trying to add itself + */ + if (workerNode != NULL && + workerNode->groupId != COORDINATOR_GROUP_ID && + workerNode->nodeRole != SecondaryNodeRoleId() && + IsWorkerTheCurrentNode(workerNode)) + { + ereport(ERROR, (errmsg("Node cannot add itself as a worker."), + errhint( + "Add the node as a coordinator by using: " + "SELECT citus_set_coordinator_host('%s', %d);", + nodeNameString, nodePort))); + } + ActivateNode(nodeNameString, nodePort); } @@ -1519,6 +1539,25 @@ citus_nodeport_for_nodeid(PG_FUNCTION_ARGS) } +/* + * citus_coordinator_nodeid returns the node id of the coordinator node + */ +Datum +citus_coordinator_nodeid(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + int coordinatorNodeId = FindCoordinatorNodeId(); + + if (coordinatorNodeId == -1) + { + PG_RETURN_INT32(0); + } + + PG_RETURN_INT32(coordinatorNodeId); +} + + /* * FindWorkerNode searches over the worker nodes and returns the workerNode * if it already exists. Else, the function returns NULL. @@ -1617,6 +1656,28 @@ FindNodeWithNodeId(int nodeId, bool missingOk) } +/* + * FindCoordinatorNodeId returns the node id of the coordinator node + */ +static int +FindCoordinatorNodeId() +{ + bool includeNodesFromOtherClusters = false; + List *nodeList = ReadDistNode(includeNodesFromOtherClusters); + WorkerNode *node = NULL; + + foreach_ptr(node, nodeList) + { + if (NodeIsCoordinator(node)) + { + return node->nodeId; + } + } + + return -1; +} + + /* * ReadDistNode iterates over pg_dist_node table, converts each row * into it's memory representation (i.e., WorkerNode) and adds them into diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index c2ec4db3a..26248f025 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -411,6 +411,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, case OBJECT_PROCEDURE: case OBJECT_AGGREGATE: case OBJECT_TSCONFIGURATION: + case OBJECT_TSDICTIONARY: case OBJECT_TYPE: case OBJECT_FOREIGN_SERVER: case OBJECT_SEQUENCE: diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index d18ef749c..bd1a53ad4 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -48,6 +48,7 @@ #include "distributed/metadata_sync.h" #include "distributed/namespace_utils.h" #include "distributed/pg_dist_shard.h" +#include "distributed/shared_library_init.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "foreign/foreign.h" @@ -613,7 +614,7 @@ GetPreLoadTableCreationCommands(Oid relationId, /* add columnar options for cstore tables */ - if (accessMethod == NULL && IsColumnarTableAmTable(relationId)) + if (accessMethod == NULL && extern_IsColumnarTableAmTable(relationId)) { TableDDLCommand *cstoreOptionsDDL = ColumnarGetTableOptionsDDL(relationId); if (cstoreOptionsDDL != NULL) @@ -1047,7 +1048,8 @@ CitusCreateAlterColumnarTableSet(char *qualifiedRelationName, options->chunkRowCount, options->stripeRowCount, options->compressionLevel, - quote_literal_cstr(CompressionTypeStr(options->compressionType))); + quote_literal_cstr(extern_CompressionTypeStr( + options->compressionType))); return buf.data; } @@ -1136,7 +1138,7 @@ ColumnarGetTableOptionsDDL(Oid relationId) char *relationName = get_rel_name(relationId); ColumnarOptions options = { 0 }; - ReadColumnarOptions(relationId, &options); + extern_ReadColumnarOptions(relationId, &options); return ColumnarGetCustomTableOptionsDDL(schemaName, relationName, options); } diff --git a/src/backend/distributed/planner/cte_inline.c b/src/backend/distributed/planner/cte_inline.c index 4dfcfef0a..4a3ba156f 100644 --- a/src/backend/distributed/planner/cte_inline.c +++ b/src/backend/distributed/planner/cte_inline.c @@ -45,9 +45,6 @@ static void InlineCTEsInQueryTree(Query *query); static bool QueryTreeContainsInlinableCteWalker(Node *node); -/* controlled via GUC */ -bool EnableCTEInlining = true; - /* * RecursivelyInlineCtesInQueryTree gets a query and recursively traverses the * tree from top to bottom. On each level, the CTEs that are eligable for diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index d5b71e505..6e053cecd 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -752,19 +752,6 @@ static PlannedStmt * InlineCtesAndCreateDistributedPlannedStmt(uint64 planId, DistributedPlanningContext *planContext) { - if (!EnableCTEInlining) - { - /* - * In Postgres 12+, users can adjust whether to inline/not inline CTEs - * by [NOT] MATERIALIZED keywords. However, in PG 11, that's not possible. - * So, with this we provide a way to prevent CTE inlining on Postgres 11. - * - * The main use-case for this is not to have divergent test outputs between - * PG 11 vs PG 12, so not very much intended for users. - */ - return NULL; - } - /* * We'll inline the CTEs and try distributed planning, preserve the original * query in case the planning fails and we fallback to recursive planning of diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 55559ce58..746e2846c 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -1513,14 +1513,10 @@ InsertSelectResultIdPrefix(uint64 planId) static void RelabelTargetEntryList(List *selectTargetList, List *insertTargetList) { - ListCell *selectTargetCell = NULL; - ListCell *insertTargetCell = NULL; - - forboth(selectTargetCell, selectTargetList, insertTargetCell, insertTargetList) + TargetEntry *selectTargetEntry = NULL; + TargetEntry *insertTargetEntry = NULL; + forboth_ptr(selectTargetEntry, selectTargetList, insertTargetEntry, insertTargetList) { - TargetEntry *selectTargetEntry = lfirst(selectTargetCell); - TargetEntry *insertTargetEntry = lfirst(insertTargetCell); - selectTargetEntry->resname = insertTargetEntry->resname; } } @@ -1537,8 +1533,6 @@ static List * AddInsertSelectCasts(List *insertTargetList, List *selectTargetList, Oid targetRelationId) { - ListCell *insertEntryCell = NULL; - ListCell *selectEntryCell = NULL; List *projectedEntries = NIL; List *nonProjectedEntries = NIL; @@ -1553,10 +1547,10 @@ AddInsertSelectCasts(List *insertTargetList, List *selectTargetList, TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation); int targetEntryIndex = 0; - forboth(insertEntryCell, insertTargetList, selectEntryCell, selectTargetList) + TargetEntry *insertEntry = NULL; + TargetEntry *selectEntry = NULL; + forboth_ptr(insertEntry, insertTargetList, selectEntry, selectTargetList) { - TargetEntry *insertEntry = (TargetEntry *) lfirst(insertEntryCell); - TargetEntry *selectEntry = (TargetEntry *) lfirst(selectEntryCell); Var *insertColumn = (Var *) insertEntry->expr; Form_pg_attribute attr = TupleDescAttr(destTupleDescriptor, insertEntry->resno - 1); diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index bca9bbaa1..1a6c708c0 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -46,9 +46,9 @@ #include "distributed/placement_connection.h" #include "distributed/tuple_destination.h" #include "distributed/tuplestore.h" -#include "distributed/listutils.h" #include "distributed/worker_protocol.h" #include "distributed/version_compat.h" +#include "distributed/jsonbutils.h" #include "executor/tstoreReceiver.h" #include "fmgr.h" #include "lib/stringinfo.h" @@ -143,10 +143,8 @@ static void ExplainWorkerPlan(PlannedStmt *plannedStmt, DestReceiver *dest, QueryEnvironment *queryEnv, const instr_time *planduration, double *executionDurationMillisec); -static bool ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue); static ExplainFormat ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName, ExplainFormat defaultValue); -static bool ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result); static TupleDestination * CreateExplainAnlyzeDestination(Task *task, TupleDestination *taskDest); static void ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task, @@ -577,8 +575,6 @@ static void ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es, ParamListInfo params) { - ListCell *taskCell = NULL; - ListCell *remoteExplainCell = NULL; List *remoteExplainList = NIL; /* if tasks are executed, we sort them by time; unless we are on a test env */ @@ -593,10 +589,9 @@ ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es, taskList = SortList(taskList, CompareTasksByTaskId); } - foreach(taskCell, taskList) + Task *task = NULL; + foreach_ptr(task, taskList) { - Task *task = (Task *) lfirst(taskCell); - RemoteExplainPlan *remoteExplain = RemoteExplain(task, es, params); remoteExplainList = lappend(remoteExplainList, remoteExplain); @@ -606,12 +601,9 @@ ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es, } } - forboth(taskCell, taskList, remoteExplainCell, remoteExplainList) + RemoteExplainPlan *remoteExplain = NULL; + forboth_ptr(task, taskList, remoteExplain, remoteExplainList) { - Task *task = (Task *) lfirst(taskCell); - RemoteExplainPlan *remoteExplain = - (RemoteExplainPlan *) lfirst(remoteExplainCell); - ExplainTask(scanState, task, remoteExplain->placementIndex, remoteExplain->explainOutputList, es); } @@ -1112,25 +1104,6 @@ FreeSavedExplainPlan(void) } -/* - * ExtractFieldBoolean gets value of fieldName from jsonbDoc, or returns - * defaultValue if it doesn't exist. - */ -static bool -ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue) -{ - Datum jsonbDatum = 0; - bool found = ExtractFieldJsonbDatum(jsonbDoc, fieldName, &jsonbDatum); - if (!found) - { - return defaultValue; - } - - Datum boolDatum = DirectFunctionCall1(jsonb_bool, jsonbDatum); - return DatumGetBool(boolDatum); -} - - /* * ExtractFieldExplainFormat gets value of fieldName from jsonbDoc, or returns * defaultValue if it doesn't exist. @@ -1169,50 +1142,6 @@ ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName, ExplainFormat } -/* - * ExtractFieldJsonbDatum gets value of fieldName from jsonbDoc and puts it - * into result. If not found, returns false. Otherwise, returns true. - */ -static bool -ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result) -{ - Datum pathArray[1] = { CStringGetTextDatum(fieldName) }; - bool pathNulls[1] = { false }; - bool typeByValue = false; - char typeAlignment = 0; - int16 typeLength = 0; - int dimensions[1] = { 1 }; - int lowerbounds[1] = { 1 }; - - get_typlenbyvalalign(TEXTOID, &typeLength, &typeByValue, &typeAlignment); - - ArrayType *pathArrayObject = construct_md_array(pathArray, pathNulls, 1, dimensions, - lowerbounds, TEXTOID, typeLength, - typeByValue, typeAlignment); - Datum pathDatum = PointerGetDatum(pathArrayObject); - - /* - * We need to check whether the result of jsonb_extract_path is NULL or not, so use - * FunctionCallInvoke() instead of other function call api. - * - * We cannot use jsonb_path_exists to ensure not-null since it is not available in - * postgres 11. - */ - FmgrInfo fmgrInfo; - fmgr_info(JsonbExtractPathFuncId(), &fmgrInfo); - - LOCAL_FCINFO(functionCallInfo, 2); - InitFunctionCallInfoData(*functionCallInfo, &fmgrInfo, 2, DEFAULT_COLLATION_OID, NULL, - NULL); - - fcSetArg(functionCallInfo, 0, jsonbDoc); - fcSetArg(functionCallInfo, 1, pathDatum); - - *result = FunctionCallInvoke(functionCallInfo); - return !functionCallInfo->isnull; -} - - /* * CitusExplainOneQuery is the executor hook that is called when * postgres wants to explain a query. @@ -1483,7 +1412,9 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc) } Form_pg_attribute attr = &tupleDesc->attrs[columnIndex]; - char *attrType = format_type_with_typemod(attr->atttypid, attr->atttypmod); + char *attrType = format_type_extended(attr->atttypid, attr->atttypmod, + FORMAT_TYPE_TYPEMOD_GIVEN | + FORMAT_TYPE_FORCE_QUALIFY); appendStringInfo(columnDef, "field_%d %s", columnIndex, attrType); } diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 9f578eac9..3d9c78bf8 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -2194,11 +2194,9 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, DeferredErrorMessage **planningError) { List *sqlTaskList = NIL; - ListCell *restrictionCell = NULL; uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */ int shardCount = 0; bool *taskRequiredForShardIndex = NULL; - ListCell *prunedRelationShardCell = NULL; /* error if shards are not co-partitioned */ ErrorIfUnsupportedShardDistribution(query); @@ -2216,14 +2214,13 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, int minShardOffset = 0; int maxShardOffset = 0; - forboth(prunedRelationShardCell, prunedRelationShardList, - restrictionCell, relationRestrictionContext->relationRestrictionList) + RelationRestriction *relationRestriction = NULL; + List *prunedShardList = NULL; + + forboth_ptr(prunedShardList, prunedRelationShardList, + relationRestriction, relationRestrictionContext->relationRestrictionList) { - RelationRestriction *relationRestriction = - (RelationRestriction *) lfirst(restrictionCell); Oid relationId = relationRestriction->relationId; - List *prunedShardList = (List *) lfirst(prunedRelationShardCell); - ListCell *shardIntervalCell = NULL; CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); if (IsCitusTableTypeCacheEntry(cacheEntry, CITUS_TABLE_WITH_NO_DIST_KEY)) @@ -2266,9 +2263,9 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId, continue; } - foreach(shardIntervalCell, prunedShardList) + ShardInterval *shardInterval = NULL; + foreach_ptr(shardInterval, prunedShardList) { - ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); int shardIndex = shardInterval->shardIndex; taskRequiredForShardIndex[shardIndex] = true; diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index f78cec339..5ad7887e9 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -45,6 +45,8 @@ #include "parser/parsetree.h" +#define INVALID_RELID -1 + /* * RecurringTuplesType is used to distinguish different types of expressions * that always produce the same set of tuples when a shard is queried. We make @@ -61,6 +63,17 @@ typedef enum RecurringTuplesType RECURRING_TUPLES_VALUES } RecurringTuplesType; +/* + * RelidsReferenceWalkerContext is used to find Vars in a (sub)query that + * refer to certain relids from the upper query. + */ +typedef struct RelidsReferenceWalkerContext +{ + int level; + Relids relids; + int foundRelid; +} RelidsReferenceWalkerContext; + /* Config variable managed via guc.c */ bool SubqueryPushdown = false; /* is subquery pushdown enabled */ @@ -76,7 +89,9 @@ static RecurringTuplesType FromClauseRecurringTupleType(Query *queryTree); static DeferredErrorMessage * DeferredErrorIfUnsupportedRecurringTuplesJoin( PlannerRestrictionContext *plannerRestrictionContext); static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree); -static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree); +static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree, bool + lateral, + char *referencedThing); static bool ExtractSetOperationStatementWalker(Node *node, List **setOperationList); static RecurringTuplesType FetchFirstRecurType(PlannerInfo *plannerInfo, Relids relids); @@ -90,7 +105,12 @@ static List * CreateSubqueryTargetListAndAdjustVars(List *columnList); static AttrNumber FindResnoForVarInTargetList(List *targetList, int varno, int varattno); static bool RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo, Relids relids); +static DeferredErrorMessage * DeferredErrorIfUnsupportedLateralSubquery( + PlannerInfo *plannerInfo, Relids recurringRelIds, Relids nonRecurringRelIds); static Var * PartitionColumnForPushedDownSubquery(Query *query); +static bool ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid); +static bool ContainsReferencesToRelidsWalker(Node *node, + RelidsReferenceWalkerContext *context); /* @@ -844,6 +864,49 @@ DeferredErrorIfUnsupportedRecurringTuplesJoin( break; } } + else if (joinType == JOIN_INNER && plannerInfo->hasLateralRTEs) + { + /* + * Sometimes we cannot push down INNER JOINS when they have only + * recurring tuples on one side and a lateral on the other side. + * See comment on DeferredErrorIfUnsupportedLateralSubquery for + * details. + * + * When planning inner joins postgres can move RTEs from left to + * right and from right to left. So we don't know on which side the + * lateral join wil appear. Thus we try to find a side of the join + * that only contains recurring tuples. And then we check the other + * side to see if it contains an unsupported lateral join. + * + */ + if (RelationInfoContainsOnlyRecurringTuples(plannerInfo, innerrelRelids)) + { + DeferredErrorMessage *deferredError = + DeferredErrorIfUnsupportedLateralSubquery(plannerInfo, + innerrelRelids, + outerrelRelids); + if (deferredError) + { + return deferredError; + } + } + else if (RelationInfoContainsOnlyRecurringTuples(plannerInfo, outerrelRelids)) + { + /* + * This branch uses "else if" instead of "if", because if both + * sides contain only recurring tuples there will never be an + * unsupported lateral subquery. + */ + DeferredErrorMessage *deferredError = + DeferredErrorIfUnsupportedLateralSubquery(plannerInfo, + outerrelRelids, + innerrelRelids); + if (deferredError) + { + return deferredError; + } + } + } } if (recurType == RECURRING_TUPLES_REFERENCE_TABLE) @@ -950,7 +1013,8 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi */ if (!ContainsReferencesToOuterQuery(subqueryTree)) { - deferredError = DeferErrorIfSubqueryRequiresMerge(subqueryTree); + deferredError = DeferErrorIfSubqueryRequiresMerge(subqueryTree, false, + "another query"); if (deferredError) { return deferredError; @@ -1028,24 +1092,29 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi * column, etc.). */ static DeferredErrorMessage * -DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree) +DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree, bool lateral, + char *referencedThing) { bool preconditionsSatisfied = true; char *errorDetail = NULL; + char *lateralString = lateral ? "lateral " : ""; + if (subqueryTree->limitOffset) { preconditionsSatisfied = false; - errorDetail = "Offset clause is currently unsupported when a subquery " - "references a column from another query"; + errorDetail = psprintf("Offset clause is currently unsupported when a %ssubquery " + "references a column from %s", lateralString, + referencedThing); } /* limit is not supported when SubqueryPushdown is not set */ if (subqueryTree->limitCount && !SubqueryPushdown) { preconditionsSatisfied = false; - errorDetail = "Limit in subquery is currently unsupported when a " - "subquery references a column from another query"; + errorDetail = psprintf("Limit clause is currently unsupported when a " + "%ssubquery references a column from %s", lateralString, + referencedThing); } /* group clause list must include partition column */ @@ -1060,9 +1129,9 @@ DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree) if (!groupOnPartitionColumn) { preconditionsSatisfied = false; - errorDetail = "Group by list without partition column is currently " - "unsupported when a subquery references a column " - "from another query"; + errorDetail = psprintf("Group by list without partition column is currently " + "unsupported when a %ssubquery references a column " + "from %s", lateralString, referencedThing); } } @@ -1070,17 +1139,18 @@ DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree) if (subqueryTree->hasAggs && (subqueryTree->groupClause == NULL)) { preconditionsSatisfied = false; - errorDetail = "Aggregates without group by are currently unsupported " - "when a subquery references a column from another query"; + errorDetail = psprintf("Aggregates without group by are currently unsupported " + "when a %ssubquery references a column from %s", + lateralString, referencedThing); } /* having clause without group by on partition column is not supported */ if (subqueryTree->havingQual && (subqueryTree->groupClause == NULL)) { preconditionsSatisfied = false; - errorDetail = "Having qual without group by on partition column is " - "currently unsupported when a subquery references " - "a column from another query"; + errorDetail = psprintf("Having qual without group by on partition column is " + "currently unsupported when a %ssubquery references " + "a column from %s", lateralString, referencedThing); } /* @@ -1397,6 +1467,259 @@ RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo, Relids relids) } +/* + * RecurringTypeDescription returns a discriptive string for the given + * recurType. This string can be used in error messages to help the users + * understand why a query cannot be planned. + */ +static char * +RecurringTypeDescription(RecurringTuplesType recurType) +{ + switch (recurType) + { + case RECURRING_TUPLES_REFERENCE_TABLE: + { + return "a reference table"; + } + + case RECURRING_TUPLES_FUNCTION: + { + return "a table function"; + } + + case RECURRING_TUPLES_EMPTY_JOIN_TREE: + { + return "a subquery without FROM"; + } + + case RECURRING_TUPLES_RESULT_FUNCTION: + { + return "complex subqueries, CTEs or local tables"; + } + + case RECURRING_TUPLES_VALUES: + { + return "a VALUES clause"; + } + + case RECURRING_TUPLES_INVALID: + { + /* + * This branch should never be hit, but it's here just in case it + * happens. + */ + return "an unknown recurring tuple"; + } + } + + /* + * This should never be hit, but is needed to fix compiler warnings. + */ + return "an unknown recurring tuple"; +} + + +/* + * ContainsReferencesToRelids determines whether the given query contains + * any references that point to columns of the given relids. The given relids + * should be from exactly one query level above the given query. + * + * If the function returns true, then foundRelid is set to the first relid that + * was referenced. + * + * There are some queries where it cannot easily be determined if the relids + * are used, e.g because the query contains placeholder vars. In those cases + * this function returns true, because it's better to error out than to return + * wrong results. But in these cases foundRelid is set to INVALID_RELID. + */ +static bool +ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid) +{ + RelidsReferenceWalkerContext context = { 0 }; + context.level = 1; + context.relids = relids; + context.foundRelid = INVALID_RELID; + int flags = 0; + + if (query_tree_walker(query, ContainsReferencesToRelidsWalker, + &context, flags)) + { + *foundRelid = context.foundRelid; + return true; + } + return false; +} + + +/* + * ContainsReferencesToRelidsWalker determines whether the given query + * contains any Vars that reference the relids in the context. + * + * ContainsReferencesToRelidsWalker recursively descends into subqueries + * and increases the level by 1 before recursing. + */ +static bool +ContainsReferencesToRelidsWalker(Node *node, RelidsReferenceWalkerContext *context) +{ + if (node == NULL) + { + return false; + } + + if (IsA(node, Var)) + { + Var *var = (Var *) node; + if (var->varlevelsup == context->level && bms_is_member(var->varno, + context->relids)) + { + context->foundRelid = var->varno; + return true; + } + + return false; + } + else if (IsA(node, Aggref)) + { + if (((Aggref *) node)->agglevelsup > context->level) + { + /* + * TODO: Only return true when aggref points to an aggregate that + * uses vars from a recurring tuple. + */ + return true; + } + } + else if (IsA(node, GroupingFunc)) + { + if (((GroupingFunc *) node)->agglevelsup > context->level) + { + /* + * TODO: Only return true when groupingfunc points to a grouping + * func that uses vars from a recurring tuple. + */ + return true; + } + + return false; + } + else if (IsA(node, PlaceHolderVar)) + { + if (((PlaceHolderVar *) node)->phlevelsup > context->level) + { + /* + * TODO: Only return true when aggref points to a placeholdervar + * that uses vars from a recurring tuple. + */ + return true; + } + } + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + int flags = 0; + + context->level += 1; + bool found = query_tree_walker(query, ContainsReferencesToRelidsWalker, + context, flags); + context->level -= 1; + + return found; + } + + return expression_tree_walker(node, ContainsReferencesToRelidsWalker, + context); +} + + +/* + * DeferredErrorIfUnsupportedLateralSubquery returns true if + * notFullyRecurringRelids contains a lateral subquery that we do not support. + * + * If there is an inner join with a lateral subquery we cannot + * push it down when the following properties all hold: + * 1. The lateral subquery contains some non recurring tuples + * 2. The lateral subquery references a recurring tuple from + * outside of the subquery (recurringRelids) + * 3. The lateral subquery requires a merge step (e.g. a LIMIT) + * 4. The reference to the recurring tuple should be something else than an + * equality check on the distribution column, e.g. equality on a non + * distribution column. + * + * Property number four is considered both hard to detect and + * probably not used very often, so we only check for 1, 2 and 3. + */ +static DeferredErrorMessage * +DeferredErrorIfUnsupportedLateralSubquery(PlannerInfo *plannerInfo, + Relids recurringRelids, + Relids notFullyRecurringRelids) +{ + int relationId = -1; + while ((relationId = bms_next_member(notFullyRecurringRelids, relationId)) >= 0) + { + RangeTblEntry *rangeTableEntry = plannerInfo->simple_rte_array[relationId]; + + if (!rangeTableEntry->lateral) + { + continue; + } + + /* TODO: What about others kinds? */ + if (rangeTableEntry->rtekind == RTE_SUBQUERY) + { + /* property number 1, contains non-recurring tuples */ + if (!FindNodeMatchingCheckFunctionInRangeTableList( + list_make1(rangeTableEntry), IsDistributedTableRTE)) + { + continue; + } + + /* property number 2, references recurring tuple */ + int recurringRelid = INVALID_RELID; + if (!ContainsReferencesToRelids(rangeTableEntry->subquery, recurringRelids, + &recurringRelid)) + { + continue; + } + + char *recurTypeDescription = + "an aggregate, grouping func or placeholder var coming from the outer query"; + if (recurringRelid != INVALID_RELID) + { + RangeTblEntry *recurringRangeTableEntry = + plannerInfo->simple_rte_array[recurringRelid]; + RecurringTuplesType recurType = RECURRING_TUPLES_INVALID; + ContainsRecurringRTE(recurringRangeTableEntry, &recurType); + recurTypeDescription = RecurringTypeDescription(recurType); + + /* + * Add the alias for all recuring tuples where it is useful to + * see them. We don't add it for VALUES and intermediate + * results, because there the aliases are currently hardcoded + * strings anyway. + */ + if (recurType != RECURRING_TUPLES_VALUES && + recurType != RECURRING_TUPLES_RESULT_FUNCTION) + { + recurTypeDescription = psprintf("%s (%s)", recurTypeDescription, + recurringRangeTableEntry->eref-> + aliasname); + } + } + + /* property number 3, has a merge step */ + DeferredErrorMessage *deferredError = DeferErrorIfSubqueryRequiresMerge( + rangeTableEntry->subquery, true, recurTypeDescription); + if (deferredError) + { + return deferredError; + } + } + } + + return NULL; +} + + /* * FetchFirstRecurType checks whether the relationInfo * contains any recurring table expression, namely a reference table, diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 5c319da53..0c48dc8a4 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -95,11 +95,42 @@ #include "utils/syscache.h" #include "utils/varlena.h" -#include "columnar/mod.h" +#include "columnar/columnar.h" /* marks shared object as one loadable by the postgres version compiled against */ PG_MODULE_MAGIC; +ColumnarSupportsIndexAM_type extern_ColumnarSupportsIndexAM = NULL; +CompressionTypeStr_type extern_CompressionTypeStr = NULL; +IsColumnarTableAmTable_type extern_IsColumnarTableAmTable = NULL; +ReadColumnarOptions_type extern_ReadColumnarOptions = NULL; + +/* + * Define "pass-through" functions so that a SQL function defined as one of + * these symbols in the citus module can use the definition in the columnar + * module. + */ +#define DEFINE_COLUMNAR_PASSTHROUGH_FUNC(funcname) \ + static PGFunction CppConcat(extern_, funcname); \ + PG_FUNCTION_INFO_V1(funcname); \ + Datum funcname(PG_FUNCTION_ARGS) \ + { \ + return CppConcat(extern_, funcname)(fcinfo); \ + } +#define INIT_COLUMNAR_SYMBOL(typename, funcname) \ + CppConcat(extern_, funcname) = \ + (typename) (void *) lookup_external_function(handle, # funcname) + +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_handler) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(alter_columnar_table_set) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(alter_columnar_table_reset) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(upgrade_columnar_storage) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(downgrade_columnar_storage) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_relation_storageid) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_storage_info) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_store_memory_stats) +DEFINE_COLUMNAR_PASSTHROUGH_FUNC(test_columnar_storage_write_new_page) + #define DUMMY_REAL_TIME_EXECUTOR_ENUM_VALUE 9999999 static char *CitusVersion = CITUS_VERSION; @@ -323,12 +354,6 @@ _PG_init(void) original_client_auth_hook = ClientAuthentication_hook; ClientAuthentication_hook = CitusAuthHook; - /* - * When the options change on a columnar table, we may need to propagate - * the changes to shards. - */ - ColumnarTableSetOptions_hook = ColumnarTableSetOptionsHook; - InitializeMaintenanceDaemon(); /* initialize coordinated transaction management */ @@ -357,7 +382,50 @@ _PG_init(void) { DoInitialCleanup(); } - columnar_init(); + + /* ensure columnar module is loaded at the right time */ + load_file(COLUMNAR_MODULE_NAME, false); + + /* + * Now, acquire symbols from columnar module. First, acquire + * the address of the set options hook, and set it so that we + * can propagate options changes. + */ + ColumnarTableSetOptions_hook_type **ColumnarTableSetOptions_hook_ptr = + (ColumnarTableSetOptions_hook_type **) find_rendezvous_variable( + COLUMNAR_SETOPTIONS_HOOK_SYM); + + /* rendezvous variable registered during columnar initialization */ + Assert(ColumnarTableSetOptions_hook_ptr != NULL); + Assert(*ColumnarTableSetOptions_hook_ptr != NULL); + + **ColumnarTableSetOptions_hook_ptr = ColumnarTableSetOptionsHook; + + /* + * Acquire symbols for columnar functions that citus calls. + */ + void *handle = NULL; + + /* use load_external_function() the first time to initialize the handle */ + extern_ColumnarSupportsIndexAM = (ColumnarSupportsIndexAM_type) (void *) + load_external_function(COLUMNAR_MODULE_NAME, + "ColumnarSupportsIndexAM", + true, &handle); + + INIT_COLUMNAR_SYMBOL(CompressionTypeStr_type, CompressionTypeStr); + INIT_COLUMNAR_SYMBOL(IsColumnarTableAmTable_type, IsColumnarTableAmTable); + INIT_COLUMNAR_SYMBOL(ReadColumnarOptions_type, ReadColumnarOptions); + + /* initialize symbols for "pass-through" functions */ + INIT_COLUMNAR_SYMBOL(PGFunction, columnar_handler); + INIT_COLUMNAR_SYMBOL(PGFunction, alter_columnar_table_set); + INIT_COLUMNAR_SYMBOL(PGFunction, alter_columnar_table_reset); + INIT_COLUMNAR_SYMBOL(PGFunction, upgrade_columnar_storage); + INIT_COLUMNAR_SYMBOL(PGFunction, downgrade_columnar_storage); + INIT_COLUMNAR_SYMBOL(PGFunction, columnar_relation_storageid); + INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info); + INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats); + INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page); } @@ -693,7 +761,7 @@ RegisterCitusConfigVariables(void) "off performance for full transactional consistency on the creation " "of new objects."), &CreateObjectPropagationMode, - CREATE_OBJECT_PROPAGATION_DEFERRED, create_object_propagation_options, + CREATE_OBJECT_PROPAGATION_IMMEDIATE, create_object_propagation_options, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); @@ -828,25 +896,6 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); - /* - * We shouldn't need this variable after we drop support to PostgreSQL 11 and - * below. So, noting it here with PG_VERSION_NUM < PG_VERSION_12 - */ - DefineCustomBoolVariable( - "citus.enable_cte_inlining", - gettext_noop("When set to false, CTE inlining feature is disabled."), - gettext_noop( - "This feature is not intended for users and it is deprecated. It is developed " - "to get consistent regression test outputs between Postgres 11" - "and Postgres 12. In Postgres 12+, the user can control the behaviour" - "by [NOT] MATERIALIZED keyword on CTEs. However, in PG 11, we cannot do " - "that."), - &EnableCTEInlining, - true, - PGC_SUSET, - GUC_NO_SHOW_ALL, - NULL, NULL, NULL); - DefineCustomBoolVariable( "citus.enable_ddl_propagation", gettext_noop("Enables propagating DDL statements to worker shards"), diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index f341976c9..688cfad8c 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -103,3 +103,5 @@ GRANT SELECT ON pg_catalog.pg_dist_object TO public; #include "udfs/citus_nodeid_for_gpid/11.0-1.sql" #include "udfs/citus_pid_for_gpid/11.0-1.sql" + +#include "udfs/citus_coordinator_nodeid/11.0-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql index 19d5bb22d..b7f4018d9 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql @@ -370,3 +370,5 @@ DROP FUNCTION pg_catalog.citus_nodeport_for_nodeid(integer); DROP FUNCTION pg_catalog.citus_nodeid_for_gpid(bigint); DROP FUNCTION pg_catalog.citus_pid_for_gpid(bigint); + +DROP FUNCTION pg_catalog.citus_coordinator_nodeid(); diff --git a/src/backend/distributed/sql/udfs/citus_coordinator_nodeid/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_coordinator_nodeid/11.0-1.sql new file mode 100644 index 000000000..2bf0dd250 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_coordinator_nodeid/11.0-1.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_coordinator_nodeid() + RETURNS integer + LANGUAGE C STABLE STRICT + AS 'MODULE_PATHNAME', $$citus_coordinator_nodeid$$; + +COMMENT ON FUNCTION pg_catalog.citus_coordinator_nodeid() + IS 'returns node id of the coordinator node'; diff --git a/src/backend/distributed/sql/udfs/citus_coordinator_nodeid/latest.sql b/src/backend/distributed/sql/udfs/citus_coordinator_nodeid/latest.sql new file mode 100644 index 000000000..2bf0dd250 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_coordinator_nodeid/latest.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_coordinator_nodeid() + RETURNS integer + LANGUAGE C STABLE STRICT + AS 'MODULE_PATHNAME', $$citus_coordinator_nodeid$$; + +COMMENT ON FUNCTION pg_catalog.citus_coordinator_nodeid() + IS 'returns node id of the coordinator node'; diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql index d93dd0f93..7b7d357ff 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql @@ -117,13 +117,12 @@ END; -- first, check if all nodes have the same versions SELECT - count(*) INTO worker_node_version_count + count(distinct result) INTO worker_node_version_count FROM - run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';') - GROUP BY result; + run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'''); IF enforce_version_check AND worker_node_version_count != 1 THEN RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' - 'the some of the workers has version different versions'; + 'some of the workers have different versions.'; ELSE RAISE DEBUG 'All worker nodes have the same Citus version'; END IF; diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql index d93dd0f93..7b7d357ff 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql @@ -117,13 +117,12 @@ END; -- first, check if all nodes have the same versions SELECT - count(*) INTO worker_node_version_count + count(distinct result) INTO worker_node_version_count FROM - run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';') - GROUP BY result; + run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'''); IF enforce_version_check AND worker_node_version_count != 1 THEN RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' - 'the some of the workers has version different versions'; + 'some of the workers have different versions.'; ELSE RAISE DEBUG 'All worker nodes have the same Citus version'; END IF; diff --git a/src/backend/distributed/test/hide_shards.c b/src/backend/distributed/test/hide_shards.c new file mode 100644 index 000000000..59e738c36 --- /dev/null +++ b/src/backend/distributed/test/hide_shards.c @@ -0,0 +1,40 @@ +/*------------------------------------------------------------------------- + * + * hide_shards.c + * + * This file contains functions to provide helper UDFs for hiding + * shards from the applications. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "funcapi.h" +#include "miscadmin.h" +#include "pgstat.h" + +#include "distributed/metadata_utility.h" +#include "distributed/worker_shard_visibility.h" + + +PG_FUNCTION_INFO_V1(set_backend_type); + +/* + * set_backend_type is an external API to set the MyBackendType and + * re-checks the shard visibility. + */ +Datum +set_backend_type(PG_FUNCTION_ARGS) +{ + EnsureSuperUser(); + + MyBackendType = PG_GETARG_INT32(0); + + elog(NOTICE, "backend type switched to: %s", + GetBackendTypeDesc(MyBackendType)); + + ResetHideShardsDecision(); + + PG_RETURN_VOID(); +} diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 78e14367a..ccd3a4e58 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -245,7 +245,7 @@ InitializeTransactionManagement(void) * transaction independent connection management. * * NB: There should only ever be a single transaction callback in citus, the - * ordering between the callbacks and thee actions within those callbacks + * ordering between the callbacks and the actions within those callbacks * otherwise becomes too undeterministic / hard to reason about. */ static void diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index e94abed53..69a318023 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -30,8 +30,9 @@ #include "distributed/transaction_recovery.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" +#include "distributed/jsonbutils.h" #include "utils/memutils.h" - +#include "utils/builtins.h" static void SendCommandToMetadataWorkersParams(const char *command, const char *user, int parameterCount, @@ -71,7 +72,7 @@ void SendCommandToWorkersAsUser(TargetWorkerSet targetWorkerSet, const char *nodeUser, const char *command) { - List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock); + List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); /* run commands serially */ WorkerNode *workerNode = NULL; @@ -184,7 +185,7 @@ void SendBareCommandListToMetadataWorkers(List *commandList) { TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES; - List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock); + List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); char *nodeUser = CurrentUserName(); ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); @@ -225,7 +226,7 @@ SendCommandToMetadataWorkersParams(const char *command, const char *const *parameterValues) { List *workerNodeList = TargetWorkerSetNodeList(NON_COORDINATOR_METADATA_NODES, - ShareLock); + RowShareLock); ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); @@ -304,7 +305,7 @@ OpenConnectionsToWorkersInParallel(TargetWorkerSet targetWorkerSet, const char * { List *connectionList = NIL; - List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock); + List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerNodeList) @@ -373,7 +374,7 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char * const char *const *parameterValues) { List *connectionList = NIL; - List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock); + List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); UseCoordinatedTransaction(); Use2PCForCoordinatedTransaction(); @@ -639,3 +640,65 @@ ErrorIfAnyMetadataNodeOutOfSync(List *metadataNodeList) } } } + + +/* + * IsWorkerTheCurrentNode checks if the given worker refers to the + * the current node by comparing the server id of the worker and of the + * current nodefrom pg_dist_node_metadata + */ +bool +IsWorkerTheCurrentNode(WorkerNode *workerNode) +{ + int connectionFlags = REQUIRE_METADATA_CONNECTION; + + MultiConnection *workerConnection = + GetNodeUserDatabaseConnection(connectionFlags, + workerNode->workerName, + workerNode->workerPort, + CurrentUserName(), + NULL); + + const char *command = + "SELECT metadata ->> 'server_id' AS server_id FROM pg_dist_node_metadata"; + + int resultCode = SendRemoteCommand(workerConnection, command); + + if (resultCode == 0) + { + CloseConnection(workerConnection); + return false; + } + + PGresult *result = GetRemoteCommandResult(workerConnection, true); + + if (result == NULL) + { + return false; + } + + List *commandResult = ReadFirstColumnAsText(result); + + PQclear(result); + ForgetResults(workerConnection); + + if ((list_length(commandResult) != 1)) + { + return false; + } + + StringInfo resultInfo = (StringInfo) linitial(commandResult); + char *workerServerId = resultInfo->data; + + Datum metadata = DistNodeMetadata(); + text *currentServerIdTextP = ExtractFieldTextP(metadata, "server_id"); + + if (currentServerIdTextP == NULL) + { + return false; + } + + char *currentServerId = text_to_cstring(currentServerIdTextP); + + return strcmp(workerServerId, currentServerId) == 0; +} diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c index 9fb616ad8..5596912a8 100644 --- a/src/backend/distributed/utils/colocation_utils.c +++ b/src/backend/distributed/utils/colocation_utils.c @@ -303,9 +303,6 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) void ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) { - ListCell *leftShardIntervalCell = NULL; - ListCell *rightShardIntervalCell = NULL; - /* get sorted shard interval lists for both tables */ List *leftShardIntervalList = LoadShardIntervalList(leftRelationId); List *rightShardIntervalList = LoadShardIntervalList(rightRelationId); @@ -329,15 +326,11 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) } /* compare shard intervals one by one */ - forboth(leftShardIntervalCell, leftShardIntervalList, - rightShardIntervalCell, rightShardIntervalList) + ShardInterval *leftInterval = NULL; + ShardInterval *rightInterval = NULL; + forboth_ptr(leftInterval, leftShardIntervalList, + rightInterval, rightShardIntervalList) { - ShardInterval *leftInterval = (ShardInterval *) lfirst(leftShardIntervalCell); - ShardInterval *rightInterval = (ShardInterval *) lfirst(rightShardIntervalCell); - - ListCell *leftPlacementCell = NULL; - ListCell *rightPlacementCell = NULL; - uint64 leftShardId = leftInterval->shardId; uint64 rightShardId = rightInterval->shardId; @@ -373,14 +366,11 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) CompareShardPlacementsByNode); /* compare shard placements one by one */ - forboth(leftPlacementCell, sortedLeftPlacementList, - rightPlacementCell, sortedRightPlacementList) + ShardPlacement *leftPlacement = NULL; + ShardPlacement *rightPlacement = NULL; + forboth_ptr(leftPlacement, sortedLeftPlacementList, + rightPlacement, sortedRightPlacementList) { - ShardPlacement *leftPlacement = - (ShardPlacement *) lfirst(leftPlacementCell); - ShardPlacement *rightPlacement = - (ShardPlacement *) lfirst(rightPlacementCell); - /* * If shard placements are on different nodes, these shard * placements are not colocated. diff --git a/src/backend/distributed/utils/jsonbutils.c b/src/backend/distributed/utils/jsonbutils.c new file mode 100644 index 000000000..22fa4f568 --- /dev/null +++ b/src/backend/distributed/utils/jsonbutils.c @@ -0,0 +1,113 @@ +#include "postgres.h" + +#include "pg_version_compat.h" + +#include "catalog/namespace.h" +#include "catalog/pg_class.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_type.h" + +#include "utils/array.h" +#include "utils/json.h" +#include "distributed/jsonbutils.h" +#include "distributed/metadata_cache.h" + +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "fmgr.h" + + +/* + * ExtractFieldJsonb gets value of fieldName from jsonbDoc and puts it + * into result. If not found, returns false. Otherwise, returns true. + * The field is returned as a Text* Datum if as_text is true, or a Jsonb* + * Datum if as_text is false. + */ +static bool +ExtractFieldJsonb(Datum jsonbDoc, const char *fieldName, Datum *result, bool as_text) +{ + Datum pathArray[1] = { CStringGetTextDatum(fieldName) }; + bool pathNulls[1] = { false }; + bool typeByValue = false; + char typeAlignment = 0; + int16 typeLength = 0; + int dimensions[1] = { 1 }; + int lowerbounds[1] = { 1 }; + + get_typlenbyvalalign(TEXTOID, &typeLength, &typeByValue, &typeAlignment); + + ArrayType *pathArrayObject = construct_md_array(pathArray, pathNulls, 1, dimensions, + lowerbounds, TEXTOID, typeLength, + typeByValue, typeAlignment); + Datum pathDatum = PointerGetDatum(pathArrayObject); + + FmgrInfo fmgrInfo; + + if (as_text) + { + fmgr_info(JsonbExtractPathTextFuncId(), &fmgrInfo); + } + else + { + fmgr_info(JsonbExtractPathFuncId(), &fmgrInfo); + } + + LOCAL_FCINFO(functionCallInfo, 2); + InitFunctionCallInfoData(*functionCallInfo, &fmgrInfo, 2, DEFAULT_COLLATION_OID, NULL, + NULL); + + fcSetArg(functionCallInfo, 0, jsonbDoc); + fcSetArg(functionCallInfo, 1, pathDatum); + + *result = FunctionCallInvoke(functionCallInfo); + return !functionCallInfo->isnull; +} + + +/* + * ExtractFieldBoolean gets value of fieldName from jsonbDoc, or returns + * defaultValue if it doesn't exist. + */ +bool +ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue) +{ + Datum jsonbDatum = 0; + bool found = ExtractFieldJsonb(jsonbDoc, fieldName, &jsonbDatum, false); + if (!found) + { + return defaultValue; + } + + Datum boolDatum = DirectFunctionCall1(jsonb_bool, jsonbDatum); + return DatumGetBool(boolDatum); +} + + +/* + * ExtractFieldTextP gets value of fieldName as text* from jsonbDoc, or + * returns NULL if it doesn't exist. + */ +text * +ExtractFieldTextP(Datum jsonbDoc, const char *fieldName) +{ + Datum jsonbDatum = 0; + + bool found = ExtractFieldJsonb(jsonbDoc, fieldName, &jsonbDatum, true); + if (!found) + { + return NULL; + } + + return DatumGetTextP(jsonbDatum); +} + + +/* + * ExtractFieldJsonbDatum gets value of fieldName from jsonbDoc and puts it + * into result. If not found, returns false. Otherwise, returns true. + */ +bool +ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result) +{ + return ExtractFieldJsonb(jsonbDoc, fieldName, result, false); +} diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 6ce96bd9f..2ba4797bf 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -244,13 +244,10 @@ CompareStringList(List *list1, List *list2) return false; } - ListCell *cell1 = NULL; - ListCell *cell2 = NULL; - forboth(cell1, list1, cell2, list2) + const char *str1 = NULL; + const char *str2 = NULL; + forboth_ptr(str1, list1, str2, list2) { - const char *str1 = lfirst(cell1); - const char *str2 = lfirst(cell2); - if (strcmp(str1, str2) != 0) { return false; @@ -286,18 +283,16 @@ CreateStmtListByObjectAddress(const ObjectAddress *address) case OCLASS_TSCONFIG: { - /* - * We do support TEXT SEARCH CONFIGURATION, however, we can't recreate the - * object in 1 command. Since the returned text is compared to the create - * statement sql we always want the sql to be different compared to the - * canonical creation sql we return here, hence we return an empty string, as - * that should never match the sql we have passed in for the creation. - */ - List *stmts = GetCreateTextSearchConfigStatements(address); return DeparseTreeNodes(stmts); } + case OCLASS_TSDICT: + { + List *stmts = GetCreateTextSearchDictionaryStatements(address); + return DeparseTreeNodes(stmts); + } + case OCLASS_TYPE: { return list_make1(DeparseTreeNode(CreateTypeStmtByObjectAddress(address))); diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 0f425583b..452de62e3 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -27,12 +27,16 @@ #include "distributed/listutils.h" #include "distributed/metadata_utility.h" #include "distributed/coordinator_protocol.h" +#include "distributed/commands/utility_hook.h" #include "distributed/metadata_cache.h" #include "distributed/metadata/distobject.h" #include "distributed/multi_partitioning_utils.h" +#include "distributed/worker_protocol.h" #include "foreign/foreign.h" +#include "tcop/utility.h" #include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/lsyscache.h" PG_FUNCTION_INFO_V1(worker_drop_distributed_table); PG_FUNCTION_INFO_V1(worker_drop_shell_table); @@ -142,20 +146,11 @@ WorkerDropDistributedTable(Oid relationId) UnmarkObjectDistributed(&distributedTableObject); - if (!IsObjectAddressOwnedByExtension(&distributedTableObject, NULL)) - { - /* - * If the table is owned by an extension, we cannot drop it, nor should we - * until the user runs DROP EXTENSION. Therefore, we skip dropping the - * table and only delete the metadata. - * - * We drop the table with cascade since other tables may be referring to it. - */ - performDeletion(&distributedTableObject, DROP_CASCADE, - PERFORM_DELETION_INTERNAL); - } - - /* iterate over shardList to delete the corresponding rows */ + /* + * Remove metadata before object's itself to make functions no-op within + * drop event trigger for undistributed objects on worker nodes except + * removing pg_dist_object entries. + */ List *shardList = LoadShardList(relationId); uint64 *shardIdPointer = NULL; foreach_ptr(shardIdPointer, shardList) @@ -176,6 +171,33 @@ WorkerDropDistributedTable(Oid relationId) /* delete the row from pg_dist_partition */ DeletePartitionRow(relationId); + + /* + * If the table is owned by an extension, we cannot drop it, nor should we + * until the user runs DROP EXTENSION. Therefore, we skip dropping the + * table. + */ + if (!IsObjectAddressOwnedByExtension(&distributedTableObject, NULL)) + { + char *relName = get_rel_name(relationId); + Oid schemaId = get_rel_namespace(relationId); + char *schemaName = get_namespace_name(schemaId); + + StringInfo dropCommand = makeStringInfo(); + appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE", + IsForeignTable(relationId) ? " FOREIGN " : " ", + quote_qualified_identifier(schemaName, relName)); + + Node *dropCommandNode = ParseTreeNode(dropCommand->data); + + /* + * We use ProcessUtilityParseTree (instead of performDeletion) to make sure that + * we also drop objects that depend on the table and call the drop event trigger + * which removes them from pg_dist_object. + */ + ProcessUtilityParseTree(dropCommandNode, dropCommand->data, + PROCESS_UTILITY_QUERY, NULL, None_Receiver, NULL); + } } diff --git a/src/backend/distributed/worker/worker_merge_protocol.c b/src/backend/distributed/worker/worker_merge_protocol.c index 577a42a9b..7c65af90f 100644 --- a/src/backend/distributed/worker/worker_merge_protocol.c +++ b/src/backend/distributed/worker/worker_merge_protocol.c @@ -29,6 +29,7 @@ #include "commands/copy.h" #include "commands/tablecmds.h" #include "common/string.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/worker_protocol.h" #include "distributed/version_compat.h" @@ -436,14 +437,11 @@ List * ColumnDefinitionList(List *columnNameList, List *columnTypeList) { List *columnDefinitionList = NIL; - ListCell *columnNameCell = NULL; - ListCell *columnTypeCell = NULL; - forboth(columnNameCell, columnNameList, columnTypeCell, columnTypeList) + const char *columnName = NULL; + const char *columnType = NULL; + forboth_ptr(columnName, columnNameList, columnType, columnTypeList) { - const char *columnName = (const char *) lfirst(columnNameCell); - const char *columnType = (const char *) lfirst(columnTypeCell); - /* * We should have a SQL compatible column type declaration; we first * convert this type to PostgreSQL's type identifiers and modifiers. diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index ca05e8cee..da9c87a22 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -8,6 +8,7 @@ */ #include "postgres.h" +#include "miscadmin.h" #include "catalog/index.h" #include "catalog/namespace.h" @@ -47,6 +48,7 @@ static HideShardsMode HideShards = CHECK_APPLICATION_NAME; static bool ShouldHideShards(void); static bool ShouldHideShardsInternal(void); +static bool IsPgBgWorker(void); static bool FilterShardsFromPgclass(Node *node, void *context); static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno); @@ -202,12 +204,15 @@ RelationIsAKnownShard(Oid shardRelationId) } } - Relation relation = try_relation_open(shardRelationId, AccessShareLock); - if (relation == NULL) + /* + * We do not take locks here, because that might block a query on pg_class. + */ + + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(shardRelationId))) { + /* relation does not exist */ return false; } - relation_close(relation, NoLock); /* * If the input relation is an index we simply replace the @@ -331,6 +336,28 @@ ResetHideShardsDecision(void) static bool ShouldHideShardsInternal(void) { + if (MyBackendType == B_BG_WORKER) + { + if (IsPgBgWorker()) + { + /* + * If a background worker belongs to Postgres, we should + * never hide shards. For other background workers, enforce + * the application_name check below. + */ + return false; + } + } + else if (MyBackendType != B_BACKEND) + { + /* + * We are aiming only to hide shards from client + * backends or certain background workers(see above), + * not backends like walsender or checkpointer. + */ + return false; + } + if (IsCitusInternalBackend() || IsRebalancerInternalBackend()) { /* we never hide shards from Citus */ @@ -369,6 +396,24 @@ ShouldHideShardsInternal(void) } +/* + * IsPgBgWorker returns true if the current background worker + * belongs to Postgres. + */ +static bool +IsPgBgWorker(void) +{ + Assert(MyBackendType == B_BG_WORKER); + + if (MyBgworkerEntry) + { + return strcmp(MyBgworkerEntry->bgw_library_name, "postgres") == 0; + } + + return false; +} + + /* * FilterShardsFromPgclass adds a NOT relation_is_a_known_shard(oid) filter * to the security quals of pg_class RTEs. diff --git a/src/include/columnar/columnar.h b/src/include/columnar/columnar.h index 5195cbfee..4d31a45ed 100644 --- a/src/include/columnar/columnar.h +++ b/src/include/columnar/columnar.h @@ -25,6 +25,10 @@ #include "columnar/columnar_compression.h" #include "columnar/columnar_metadata.h" +#define COLUMNAR_MODULE_NAME "citus_columnar" + +#define COLUMNAR_SETOPTIONS_HOOK_SYM "ColumnarTableSetOptions_hook" + /* Defines for valid option names */ #define OPTION_NAME_COMPRESSION_TYPE "compression" #define OPTION_NAME_STRIPE_ROW_COUNT "stripe_row_limit" @@ -187,6 +191,10 @@ typedef enum StripeWriteStateEnum STRIPE_WRITE_IN_PROGRESS } StripeWriteStateEnum; +typedef bool (*ColumnarSupportsIndexAM_type)(char *); +typedef const char *(*CompressionTypeStr_type)(CompressionType); +typedef bool (*IsColumnarTableAmTable_type)(Oid); +typedef bool (*ReadColumnarOptions_type)(Oid, ColumnarOptions *); /* ColumnarReadState represents state of a columnar scan. */ struct ColumnarReadState; @@ -205,8 +213,8 @@ extern int columnar_compression_level; /* called when the user changes options on the given relation */ typedef void (*ColumnarTableSetOptions_hook_type)(Oid relid, ColumnarOptions options); -extern ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook; +extern void columnar_init(void); extern void columnar_init_gucs(void); extern CompressionType ParseCompressionType(const char *compressionTypeString); @@ -315,5 +323,4 @@ extern bool PendingWritesInUpperTransactions(Oid relfilenode, SubTransactionId currentSubXid); extern MemoryContext GetWriteContextForDebug(void); - #endif /* COLUMNAR_H */ diff --git a/src/include/columnar/mod.h b/src/include/columnar/mod.h deleted file mode 100644 index 7440a9fd7..000000000 --- a/src/include/columnar/mod.h +++ /dev/null @@ -1,19 +0,0 @@ -/*------------------------------------------------------------------------- - * - * mod.h - * - * Type and function declarations for columnar - * - * Copyright (c) Citus Data, Inc. - * - *------------------------------------------------------------------------- - */ - -#ifndef MOD_H -#define MOD_H - -/* Function declarations for extension loading and unloading */ -extern void columnar_init(void); -extern void columnar_fini(void); - -#endif /* MOD_H */ diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index e8b712d67..83ee046fe 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -29,6 +29,7 @@ /* Function declarations for version independent Citus ruleutils wrapper functions */ extern char * pg_get_extensiondef_string(Oid tableRelationId); extern Oid get_extension_schema(Oid ext_oid); +extern char * get_extension_version(Oid extensionId); extern char * pg_get_serverdef_string(Oid tableRelationId); extern char * pg_get_sequencedef_string(Oid sequenceRelid); extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 660220324..ef22bbb63 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -123,6 +123,8 @@ typedef enum SearchForeignKeyColumnFlags /* aggregate.c - forward declarations */ +extern List * PreprocessDefineAggregateStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); extern List * PostprocessDefineAggregateStmt(Node *node, const char *queryString); /* cluster.c - forward declarations */ @@ -146,6 +148,7 @@ extern List * PreprocessDropCollationStmt(Node *stmt, const char *queryString, extern List * PreprocessAlterCollationOwnerStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PostprocessAlterCollationOwnerStmt(Node *node, const char *queryString); extern List * PreprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -281,6 +284,7 @@ extern ObjectAddress RenameFunctionStmtObjectAddress(Node *stmt, bool missing_ok); extern List * PreprocessAlterFunctionOwnerStmt(Node *stmt, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PostprocessAlterFunctionOwnerStmt(Node *stmt, const char *queryString); extern ObjectAddress AlterFunctionOwnerObjectAddress(Node *stmt, bool missing_ok); extern List * PreprocessAlterFunctionSchemaStmt(Node *stmt, const char *queryString, @@ -432,6 +436,7 @@ extern List * PreprocessAlterStatisticsStmt(Node *node, const char *queryString, extern List * PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PostprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString); extern List * GetExplicitStatisticsCommandList(Oid relationId); extern List * GetExplicitStatisticsSchemaIdList(Oid relationId); extern List * GetAlterIndexStatisticsCommands(Oid indexOid); @@ -476,49 +481,94 @@ extern bool ConstrTypeUsesIndex(ConstrType constrType); /* text_search.c - forward declarations */ extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString); +extern List * PostprocessCreateTextSearchDictionaryStmt(Node *node, + const char *queryString); extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address); +extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *address); extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address); +extern List * CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address); extern List * PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessDropTextSearchDictionaryStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessAlterTextSearchDictionaryStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessRenameTextSearchDictionaryStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessAlterTextSearchDictionarySchemaStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString); +extern List * PostprocessAlterTextSearchDictionarySchemaStmt(Node *node, + const char *queryString); extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessTextSearchDictionaryCommentStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessAlterTextSearchDictionaryOwnerStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString); +extern List * PostprocessAlterTextSearchDictionaryOwnerStmt(Node *node, + const char *queryString); extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress CreateTextSearchDictObjectAddress(Node *node, + bool missing_ok); extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress RenameTextSearchDictionaryStmtObjectAddress(Node *node, + bool missing_ok); extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress AlterTextSearchDictionaryStmtObjectAddress(Node *node, + bool missing_ok); extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, + bool missing_ok); extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress TextSearchDictCommentObjectAddress(Node *node, + bool missing_ok); extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok); +extern ObjectAddress AlterTextSearchDictOwnerObjectAddress(Node *node, + bool missing_ok); extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address); +extern char * GenerateBackupNameForTextSearchDict(const ObjectAddress *address); extern List * get_ts_config_namelist(Oid tsconfigOid); /* truncate.c - forward declarations */ diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index ad575cfe5..2e31bc9da 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -18,6 +18,7 @@ #include "lib/ilist.h" #include "pg_config.h" #include "portability/instr_time.h" +#include "storage/latch.h" #include "utils/guc.h" #include "utils/hsearch.h" #include "utils/timestamp.h" @@ -34,6 +35,10 @@ /* application name used for internal connections in rebalancer */ #define CITUS_REBALANCER_NAME "citus_rebalancer" +/* deal with waiteventset errors */ +#define WAIT_EVENT_SET_INDEX_NOT_INITIALIZED -1 +#define WAIT_EVENT_SET_INDEX_FAILED -2 + /* forward declare, to avoid forcing large headers on everyone */ struct pg_conn; /* target of the PGconn typedef */ struct MemoryContextData; @@ -284,6 +289,13 @@ extern bool IsCitusInternalBackend(void); extern bool IsRebalancerInternalBackend(void); extern void MarkConnectionConnected(MultiConnection *connection); +/* waiteventset utilities */ +extern int CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd, + Latch *latch, void *user_data); + +extern bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, + Latch *latch); + /* time utilities */ extern double MillisecondsPassedSince(instr_time moment); extern long MillisecondsToTimeout(instr_time start, long msAfterStart); diff --git a/src/include/distributed/cte_inline.h b/src/include/distributed/cte_inline.h index 09cac7bdb..f9fd8fa9d 100644 --- a/src/include/distributed/cte_inline.h +++ b/src/include/distributed/cte_inline.h @@ -13,8 +13,6 @@ #include "nodes/parsenodes.h" -extern bool EnableCTEInlining; - extern void RecursivelyInlineCtesInQueryTree(Query *query); extern bool QueryTreeContainsInlinableCTE(Query *queryTree); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index e3b02cdfc..2c74f060c 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -63,14 +63,21 @@ extern char * DeparseAlterTableStmt(Node *node); extern void QualifyAlterTableSchemaStmt(Node *stmt); -/* foward declarations fro deparse_text_search.c */ -extern char * DeparseCreateTextSearchStmt(Node *node); -extern char * DeparseDropTextSearchConfigurationStmt(Node *node); -extern char * DeparseRenameTextSearchConfigurationStmt(Node *node); -extern char * DeparseAlterTextSearchConfigurationStmt(Node *node); -extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node); -extern char * DeparseTextSearchConfigurationCommentStmt(Node *node); +/* forward declarations for deparse_text_search.c */ extern char * DeparseAlterTextSearchConfigurationOwnerStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationStmt(Node *node); +extern char * DeparseAlterTextSearchDictionaryOwnerStmt(Node *node); +extern char * DeparseAlterTextSearchDictionarySchemaStmt(Node *node); +extern char * DeparseAlterTextSearchDictionaryStmt(Node *node); +extern char * DeparseCreateTextSearchConfigurationStmt(Node *node); +extern char * DeparseCreateTextSearchDictionaryStmt(Node *node); +extern char * DeparseDropTextSearchConfigurationStmt(Node *node); +extern char * DeparseDropTextSearchDictionaryStmt(Node *node); +extern char * DeparseRenameTextSearchConfigurationStmt(Node *node); +extern char * DeparseRenameTextSearchDictionaryStmt(Node *node); +extern char * DeparseTextSearchConfigurationCommentStmt(Node *node); +extern char * DeparseTextSearchDictionaryCommentStmt(Node *node); /* forward declarations for deparse_schema_stmts.c */ extern char * DeparseCreateSchemaStmt(Node *node); @@ -153,13 +160,19 @@ extern char * DeparseAlterExtensionStmt(Node *stmt); /* forward declarations for deparse_database_stmts.c */ extern char * DeparseAlterDatabaseOwnerStmt(Node *node); -/* forward declatations for depatse_text_search_stmts.c */ -extern void QualifyDropTextSearchConfigurationStmt(Node *node); -extern void QualifyAlterTextSearchConfigurationStmt(Node *node); -extern void QualifyRenameTextSearchConfigurationStmt(Node *node); -extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node); -extern void QualifyTextSearchConfigurationCommentStmt(Node *node); +/* forward declatations for deparse_text_search_stmts.c */ extern void QualifyAlterTextSearchConfigurationOwnerStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationStmt(Node *node); +extern void QualifyAlterTextSearchDictionaryOwnerStmt(Node *node); +extern void QualifyAlterTextSearchDictionarySchemaStmt(Node *node); +extern void QualifyAlterTextSearchDictionaryStmt(Node *node); +extern void QualifyDropTextSearchConfigurationStmt(Node *node); +extern void QualifyDropTextSearchDictionaryStmt(Node *node); +extern void QualifyRenameTextSearchConfigurationStmt(Node *node); +extern void QualifyRenameTextSearchDictionaryStmt(Node *node); +extern void QualifyTextSearchConfigurationCommentStmt(Node *node); +extern void QualifyTextSearchDictionaryCommentStmt(Node *node); /* forward declarations for deparse_sequence_stmts.c */ extern char * DeparseDropSequenceStmt(Node *node); diff --git a/src/include/distributed/jsonbutils.h b/src/include/distributed/jsonbutils.h new file mode 100644 index 000000000..3e37fa38e --- /dev/null +++ b/src/include/distributed/jsonbutils.h @@ -0,0 +1,20 @@ +/*------------------------------------------------------------------------- + * + * jsonbutils.h + * + * Declarations for public utility functions related to jsonb. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ +#ifndef CITUS_JSONBUTILS_H +#define CITUS_JSONBUTILS_H + +#include "postgres.h" + +bool ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result); +text * ExtractFieldTextP(Datum jsonbDoc, const char *fieldName); +bool ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue); + +#endif /* CITUS_JSONBUTILS_H */ diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index e257b7692..ee0cb6e23 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -80,6 +80,59 @@ typedef struct ListCellAndListWrapper (((var) = lfirst_oid(var ## CellDoNotUse)) || true); \ var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse)) +/* + * forboth_ptr - + * a convenience macro which loops through two lists of pointers at the same + * time, without needing a ListCell. It only needs two declared pointer + * variables to store the pointer of each of the two cells in. + */ +#define forboth_ptr(var1, l1, var2, l2) \ + for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \ + *(var2 ## CellDoNotUse) = list_head(l2); \ + (var1 ## CellDoNotUse) != NULL && \ + (var2 ## CellDoNotUse) != NULL && \ + (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \ + (((var2) = lfirst(var2 ## CellDoNotUse)) || true); \ + var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \ + var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \ + ) + +/* + * forboth_ptr_oid - + * a convenience macro which loops through two lists at the same time. The + * first list should contain pointers and the second list should contain + * Oids. It does not need a ListCell to do this. It only needs two declared + * variables to store the pointer and the Oid of each of the two cells in. + */ +#define forboth_ptr_oid(var1, l1, var2, l2) \ + for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \ + *(var2 ## CellDoNotUse) = list_head(l2); \ + (var1 ## CellDoNotUse) != NULL && \ + (var2 ## CellDoNotUse) != NULL && \ + (((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \ + (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \ + var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \ + var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \ + ) + +/* + * forboth_int_oid - + * a convenience macro which loops through two lists at the same time. The + * first list should contain integers and the second list should contain + * Oids. It does not need a ListCell to do this. It only needs two declared + * variables to store the int and the Oid of each of the two cells in. + */ +#define forboth_int_oid(var1, l1, var2, l2) \ + for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \ + *(var2 ## CellDoNotUse) = list_head(l2); \ + (var1 ## CellDoNotUse) != NULL && \ + (var2 ## CellDoNotUse) != NULL && \ + (((var1) = lfirst_int(var1 ## CellDoNotUse)) || true) && \ + (((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \ + var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \ + var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \ + ) + /* * foreach_ptr_append - * a convenience macro which loops through a pointer List and can append list diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index 46ba72a49..e190aef6f 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -256,6 +256,7 @@ extern Oid PgTableVisibleFuncId(void); extern Oid CitusTableVisibleFuncId(void); extern Oid RelationIsAKnownShardFuncId(void); extern Oid JsonbExtractPathFuncId(void); +extern Oid JsonbExtractPathTextFuncId(void); /* enum oids */ extern Oid PrimaryNodeRoleId(void); diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index c03b3abe7..0d9f125d8 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -290,7 +290,8 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection, uint64 *availableBytes, uint64 *totalBytes); extern void ExecuteQueryViaSPI(char *query, int SPIOK); -extern void EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId); +extern void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid + ownerRelationId); extern void AlterSequenceType(Oid seqOid, Oid typeOid); extern void EnsureRelationHasCompatibleSequenceTypes(Oid relationId); #endif /* METADATA_UTILITY_H */ diff --git a/src/include/distributed/shared_library_init.h b/src/include/distributed/shared_library_init.h index b5bc09da3..485ab553f 100644 --- a/src/include/distributed/shared_library_init.h +++ b/src/include/distributed/shared_library_init.h @@ -11,10 +11,17 @@ #ifndef SHARED_LIBRARY_INIT_H #define SHARED_LIBRARY_INIT_H +#include "columnar/columnar.h" + #define GUC_STANDARD 0 #define MAX_SHARD_COUNT 64000 #define MAX_SHARD_REPLICATION_FACTOR 100 +extern ColumnarSupportsIndexAM_type extern_ColumnarSupportsIndexAM; +extern CompressionTypeStr_type extern_CompressionTypeStr; +extern IsColumnarTableAmTable_type extern_IsColumnarTableAmTable; +extern ReadColumnarOptions_type extern_ReadColumnarOptions; + extern void StartupCitusBackend(void); #endif /* SHARED_LIBRARY_INIT_H */ diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index c3748ee5b..6cb7d8bce 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -70,4 +70,6 @@ extern void RemoveWorkerTransaction(const char *nodeName, int32 nodePort); /* helper functions for worker transactions */ extern bool IsWorkerTransactionActive(void); +extern bool IsWorkerTheCurrentNode(WorkerNode *workerNode); + #endif /* WORKER_TRANSACTION_H */ diff --git a/src/test/regress/base_schedule b/src/test/regress/base_schedule index 7eca2ab85..0a2ada96f 100644 --- a/src/test/regress/base_schedule +++ b/src/test/regress/base_schedule @@ -7,3 +7,4 @@ test: multi_test_catalog_views test: multi_create_table multi_behavioral_analytics_create_table test: multi_create_table_superuser multi_behavioral_analytics_create_table_superuser test: multi_load_data multi_load_data_superuser tablespace +test: check_mx diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index d7ffe31c3..329d63722 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -263,3 +263,23 @@ s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_b # node id in run_command_on_all_nodes warning s/Error on node with node id [0-9]+/Error on node with node id xxxxx/g + +# Temp schema names in error messages regarding dependencies that we cannot distribute +# +# 1) Schema of the depending object in the error message: +# +# e.g.: +# WARNING: "function pg_temp_3.f(bigint)" has dependency on unsupported object "" +# will be replaced with +# WARNING: "function pg_temp_xxx.f(bigint)" has dependency on unsupported object "" +s/^(WARNING|ERROR)(: "[a-z\ ]+ )pg_temp_[0-9]+(\..*" has dependency on unsupported object ".*")$/\1\2pg_temp_xxx\3/g + +# 2) Schema of the depending object in the error detail: +s/^(DETAIL: "[a-z\ ]+ )pg_temp_[0-9]+(\..*" will be created only locally)$/\1pg_temp_xxx\2/g + +# 3) Schema that the object depends in the error message: +# e.g.: +# WARNING: "function func(bigint)" has dependency on unsupported object "schema pg_temp_3" +# will be replaced with +# WARNING: "function func(bigint)" has dependency on unsupported object "schema pg_temp_xxx" +s/^(WARNING|ERROR)(: "[a-z\ ]+ .*" has dependency on unsupported object) "schema pg_temp_[0-9]+"$/\1\2 "schema pg_temp_xxx"/g diff --git a/src/test/regress/citus_tests/arbitrary_configs/README.md b/src/test/regress/citus_tests/arbitrary_configs/README.md index 5a3806eee..3829a72c3 100644 --- a/src/test/regress/citus_tests/arbitrary_configs/README.md +++ b/src/test/regress/citus_tests/arbitrary_configs/README.md @@ -69,6 +69,10 @@ So the infrastructure tests: When you want to add a new test, you can add the create statements to `create_schedule` and add the sql queries to `sql_schedule`. If you are adding Citus UDFs that should be a NO-OP for Postgres, make sure to override the UDFs in `postgres.sql`. +If the test needs to be skipped in some configs, you can do that by adding the test names in the `skip_tests` array for +each config. The test files associated with the skipped test will be set to empty so the test will pass without the actual test +being run. + ## Adding a new config You can add your new config to `config.py`. Make sure to extend either `CitusDefaultClusterConfig` or `CitusMXBaseClusterConfig`. diff --git a/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py b/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py index b5f71f945..5f023d819 100755 --- a/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py +++ b/src/test/regress/citus_tests/arbitrary_configs/citus_arbitrary_configs.py @@ -55,7 +55,6 @@ def run_for_config(config, lock, sql_schedule_name): if config.user == cfg.REGULAR_USER_NAME: common.create_role( config.bindir, - config.coordinator_port(), config.node_name_to_ports.values(), config.user, ) @@ -129,13 +128,24 @@ def copy_test_files(config): colon_index = line.index(":") line = line[colon_index + 1 :].strip() test_names = line.split(" ") - copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path) + copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path, config) -def copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path): +def copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path, config): for test_name in test_names: + # make empty files for the skipped tests + if test_name in config.skip_tests: + expected_sql_file = os.path.join(sql_dir_path, test_name + ".sql") + open(expected_sql_file, 'x').close() + + expected_out_file = os.path.join(expected_dir_path, test_name + ".out") + open(expected_out_file, 'x').close() + + continue + sql_name = os.path.join("./sql", test_name + ".sql") output_name = os.path.join("./expected", test_name + ".out") + shutil.copy(sql_name, sql_dir_path) if os.path.isfile(output_name): # it might be the first time we run this test and the expected file diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 49cb1bfae..c2e770d79 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -3,6 +3,7 @@ import shutil import sys import subprocess import atexit +import concurrent.futures import utils from utils import USER, cd @@ -24,9 +25,19 @@ def initialize_temp_dir_if_not_exists(temp_dir): os.chmod(temp_dir, 0o777) +def parallel_run(function, items, *args, **kwargs): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(function, item, *args, **kwargs) + for item in items + ] + for future in futures: + future.result() + def initialize_db_for_cluster(pg_path, rel_data_path, settings, node_names): subprocess.run(["mkdir", rel_data_path], check=True) - for node_name in node_names: + + def initialize(node_name): abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) command = [ os.path.join(pg_path, "initdb"), @@ -34,10 +45,18 @@ def initialize_db_for_cluster(pg_path, rel_data_path, settings, node_names): abs_data_path, "--username", USER, + "--no-sync", + # --allow-group-access is used to ensure we set permissions on + # private keys correctly + "--allow-group-access", + "--encoding", + "UTF8" ] subprocess.run(command, check=True) add_settings(abs_data_path, settings) + parallel_run(initialize, node_names) + def add_settings(abs_data_path, settings): conf_path = os.path.join(abs_data_path, "postgresql.conf") @@ -49,15 +68,17 @@ def add_settings(abs_data_path, settings): conf_file.write(setting) -def create_role(pg_path, port, node_ports, user_name): - for port in node_ports: - command = "SELECT worker_create_or_alter_role('{}', 'CREATE ROLE {} WITH LOGIN CREATEROLE CREATEDB;', NULL)".format( +def create_role(pg_path, node_ports, user_name): + def create(port): + command = "SET citus.enable_ddl_propagation TO OFF; SELECT worker_create_or_alter_role('{}', 'CREATE ROLE {} WITH LOGIN CREATEROLE CREATEDB;', NULL)".format( user_name, user_name ) utils.psql(pg_path, port, command) - command = "GRANT CREATE ON DATABASE postgres to {}".format(user_name) + command = "SET citus.enable_ddl_propagation TO OFF; GRANT CREATE ON DATABASE postgres to {}".format(user_name) utils.psql(pg_path, port, command) + parallel_run(create, node_ports) + def coordinator_should_haveshards(pg_path, port): command = "SELECT citus_set_node_property('localhost', {}, 'shouldhaveshards', true)".format( @@ -67,7 +88,7 @@ def coordinator_should_haveshards(pg_path, port): def start_databases(pg_path, rel_data_path, node_name_to_ports, logfile_prefix, env_variables): - for node_name in node_name_to_ports.keys(): + def start(node_name): abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) node_port = node_name_to_ports[node_name] command = [ @@ -89,6 +110,11 @@ def start_databases(pg_path, rel_data_path, node_name_to_ports, logfile_prefix, subprocess.run(command, check=True) + parallel_run(start, node_name_to_ports.keys()) + + # We don't want parallel shutdown here because that will fail when it's + # tried in this atexit call with an error like: + # cannot schedule new futures after interpreter shutdown atexit.register( stop_databases, pg_path, @@ -96,13 +122,16 @@ def start_databases(pg_path, rel_data_path, node_name_to_ports, logfile_prefix, node_name_to_ports, logfile_prefix, no_output=True, + parallel=False, ) def create_citus_extension(pg_path, node_ports): - for port in node_ports: + def create(port): utils.psql(pg_path, port, "CREATE EXTENSION citus;") + parallel_run(create, node_ports) + def run_pg_regress(pg_path, pg_srcdir, port, schedule): should_exit = True @@ -215,9 +244,9 @@ def logfile_name(logfile_prefix, node_name): def stop_databases( - pg_path, rel_data_path, node_name_to_ports, logfile_prefix, no_output=False + pg_path, rel_data_path, node_name_to_ports, logfile_prefix, no_output=False, parallel=True ): - for node_name in node_name_to_ports.keys(): + def stop(node_name): abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) node_port = node_name_to_ports[node_name] command = [ @@ -239,6 +268,12 @@ def stop_databases( else: subprocess.call(command) + if parallel: + parallel_run(stop, node_name_to_ports.keys()) + else: + for node_name in node_name_to_ports.keys(): + stop(node_name) + def initialize_citus_cluster(bindir, datadir, settings, config): # In case there was a leftover from previous runs, stop the databases diff --git a/src/test/regress/citus_tests/config.py b/src/test/regress/citus_tests/config.py index 205be5aed..0e804ea6d 100644 --- a/src/test/regress/citus_tests/config.py +++ b/src/test/regress/citus_tests/config.py @@ -58,10 +58,7 @@ port_lock = threading.Lock() def should_include_config(class_name): - if inspect.isclass(class_name) and ( - issubclass(class_name, CitusMXBaseClusterConfig) - or issubclass(class_name, CitusDefaultClusterConfig) - ): + if inspect.isclass(class_name) and issubclass(class_name, CitusDefaultClusterConfig): return True return False @@ -97,12 +94,13 @@ class CitusBaseClusterConfig(object, metaclass=NewInitCaller): self.temp_dir = CITUS_ARBITRARY_TEST_DIR self.worker_amount = 2 self.user = REGULAR_USER_NAME - self.is_mx = False + self.is_mx = True self.is_citus = True self.name = type(self).__name__ self.settings = { "shared_preload_libraries": "citus", "log_error_verbosity": "terse", + "fsync": False, "citus.node_conninfo": "sslmode=prefer", "citus.enable_repartition_joins": True, "citus.repartition_join_bucket_count_per_node": 2, @@ -111,6 +109,7 @@ class CitusBaseClusterConfig(object, metaclass=NewInitCaller): self.new_settings = {} self.add_coordinator_to_metadata = False self.env_variables = {} + self.skip_tests = [] def post_init(self): self._init_node_name_ports() @@ -167,12 +166,6 @@ class CitusDefaultClusterConfig(CitusBaseClusterConfig): self.add_coordinator_to_metadata = True -class CitusMXBaseClusterConfig(CitusDefaultClusterConfig): - def __init__(self, arguments): - super().__init__(arguments) - self.is_mx = True - - class CitusUpgradeConfig(CitusBaseClusterConfig): def __init__(self, arguments): super().__init__(arguments) @@ -183,6 +176,7 @@ class CitusUpgradeConfig(CitusBaseClusterConfig): self.user = SUPER_USER_NAME self.mixed_mode = arguments["--mixed"] self.fixed_port = 57635 + self.is_mx = False class PostgresConfig(CitusDefaultClusterConfig): @@ -204,19 +198,19 @@ class CitusSingleNodeClusterConfig(CitusDefaultClusterConfig): common.coordinator_should_haveshards(self.bindir, self.coordinator_port()) -class CitusSingleWorkerClusterConfig(CitusMXBaseClusterConfig): +class CitusSingleWorkerClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.worker_amount = 1 -class CitusSuperUserDefaultClusterConfig(CitusMXBaseClusterConfig): +class CitusSuperUserDefaultClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.user = SUPER_USER_NAME -class CitusThreeWorkersManyShardsClusterConfig(CitusMXBaseClusterConfig): +class CitusThreeWorkersManyShardsClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = {"citus.shard_count": 191} @@ -226,7 +220,7 @@ class CitusThreeWorkersManyShardsClusterConfig(CitusMXBaseClusterConfig): common.coordinator_should_haveshards(self.bindir, self.coordinator_port()) -class CitusSmallSharedPoolSizeConfig(CitusMXBaseClusterConfig): +class CitusSmallSharedPoolSizeConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -235,7 +229,7 @@ class CitusSmallSharedPoolSizeConfig(CitusMXBaseClusterConfig): } -class CitusSmallExecutorPoolSizeConfig(CitusMXBaseClusterConfig): +class CitusSmallExecutorPoolSizeConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -243,7 +237,7 @@ class CitusSmallExecutorPoolSizeConfig(CitusMXBaseClusterConfig): } -class CitusSequentialExecutionConfig(CitusMXBaseClusterConfig): +class CitusSequentialExecutionConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -251,7 +245,7 @@ class CitusSequentialExecutionConfig(CitusMXBaseClusterConfig): } -class CitusCacheManyConnectionsConfig(CitusMXBaseClusterConfig): +class CitusCacheManyConnectionsConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -259,7 +253,7 @@ class CitusCacheManyConnectionsConfig(CitusMXBaseClusterConfig): } -class CitusUnusualExecutorConfig(CitusMXBaseClusterConfig): +class CitusUnusualExecutorConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -280,7 +274,7 @@ class CitusUnusualExecutorConfig(CitusMXBaseClusterConfig): self.env_variables = {'PGAPPNAME' : 'test_app'} -class CitusSmallCopyBuffersConfig(CitusMXBaseClusterConfig): +class CitusSmallCopyBuffersConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -290,7 +284,7 @@ class CitusSmallCopyBuffersConfig(CitusMXBaseClusterConfig): } -class CitusUnusualQuerySettingsConfig(CitusMXBaseClusterConfig): +class CitusUnusualQuerySettingsConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = { @@ -304,6 +298,13 @@ class CitusUnusualQuerySettingsConfig(CitusMXBaseClusterConfig): "citus.values_materialization_threshold": "0", } + self.skip_tests = [ + # Creating a reference table from a table referred to by a fk + # requires the table with the fk to be converted to a citus_local_table. + # As of c11, there is no way to do that through remote execution so this test + # will fail + "arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade"] + class CitusSingleNodeSingleShardClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): @@ -315,19 +316,26 @@ class CitusSingleNodeSingleShardClusterConfig(CitusDefaultClusterConfig): common.coordinator_should_haveshards(self.bindir, self.coordinator_port()) -class CitusShardReplicationFactorClusterConfig(CitusMXBaseClusterConfig): +class CitusShardReplicationFactorClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = {"citus.shard_replication_factor": 2} + self.skip_tests = [ + # citus does not support foreign keys in distributed tables + # when citus.shard_replication_factor > 2 + "arbitrary_configs_truncate_partition_create", "arbitrary_configs_truncate_partition", + # citus does not support modifying a partition when + # citus.shard_replication_factor > 2 + "arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade"] -class CitusSingleShardClusterConfig(CitusMXBaseClusterConfig): +class CitusSingleShardClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.new_settings = {"citus.shard_count": 1} -class CitusNonMxClusterConfig(CitusMXBaseClusterConfig): +class CitusNonMxClusterConfig(CitusDefaultClusterConfig): def __init__(self, arguments): super().__init__(arguments) self.is_mx = False diff --git a/src/test/regress/create_schedule b/src/test/regress/create_schedule index 17fc6559b..3da31fde9 100644 --- a/src/test/regress/create_schedule +++ b/src/test/regress/create_schedule @@ -4,3 +4,9 @@ test: dropped_columns_create_load distributed_planning_create_load test: local_dist_join_load test: partitioned_indexes_create test: connectivity_checks +test: views_create +test: sequences_create +test: index_create +test: arbitrary_configs_truncate_create +test: arbitrary_configs_truncate_cascade_create +test: arbitrary_configs_truncate_partition_create diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 51808515b..01f3a682d 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -1,6 +1,10 @@ -- -- ADD_COORDINATOR -- +-- node trying to add itself without specifying groupid => 0 should error out +SELECT master_add_node('localhost', :master_port); +ERROR: Node cannot add itself as a worker. +HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636); SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata -- adding the same node again should return the existing nodeid diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 1459c139b..2bf44bd1b 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -1114,6 +1114,7 @@ create function dummy_fnc(a dummy_tbl, d double precision) RETURNS dummy_tbl -- test in tx block -- shouldn't distribute, as citus.create_object_propagation is set to deferred BEGIN; +SET LOCAL citus.create_object_propagation TO deferred; create aggregate dependent_agg (float8) (stype=dummy_tbl, sfunc=dummy_fnc); COMMIT; -- verify not distributed @@ -1188,6 +1189,39 @@ DROP TABLE dummy_tbl CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to function dummy_fnc(dummy_tbl,double precision) drop cascades to function dependent_agg(double precision) +-- Show that polymorphic aggregates with zero-argument works +CREATE FUNCTION stfnp_zero_arg(int[]) RETURNS int[] AS +'select $1' LANGUAGE SQL; +CREATE FUNCTION ffp_zero_arg(anyarray) RETURNS anyarray AS +'select $1' LANGUAGE SQL; +CREATE AGGREGATE zero_arg_agg(*) (SFUNC = stfnp_zero_arg, STYPE = int4[], + FINALFUNC = ffp_zero_arg, INITCOND = '{}'); +CREATE TABLE zero_arg_agg_table(f1 int, f2 int[]); +SELECT create_distributed_table('zero_arg_agg_table','f1'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO zero_arg_agg_table VALUES(1, array[1]); +INSERT INTO zero_arg_agg_table VALUES(1, array[11]); +SELECT zero_arg_agg(*) from zero_arg_agg_table; + zero_arg_agg +--------------------------------------------------------------------- + {} +(1 row) + +-- Show that after dropping a table on which functions and aggregates depending on +-- pg_dist_object is consistent on coordinator and worker node. +SELECT pg_identify_object_as_address(classid, objid, objsubid)::text +FROM pg_catalog.pg_dist_object + EXCEPT +SELECT unnest(result::text[]) AS unnested_result +FROM run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) from pg_catalog.pg_dist_object$$); + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + SET citus.create_object_propagation TO automatic; begin; create type typ1 as (a int); @@ -1202,5 +1236,17 @@ SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid (localhost,57638,t,aggregate_support.dependent_agg) (2 rows) +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, basetype = int4, stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); +SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%newavg%';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,aggregate_support.newavg) + (localhost,57638,t,aggregate_support.newavg) +(2 rows) + set client_min_messages to error; drop schema aggregate_support cascade; diff --git a/src/test/regress/expected/arbitrary_configs_truncate.out b/src/test/regress/expected/arbitrary_configs_truncate.out new file mode 100644 index 000000000..78d6442e8 --- /dev/null +++ b/src/test/regress/expected/arbitrary_configs_truncate.out @@ -0,0 +1,85 @@ +SET search_path TO truncate_tests_schema; +-- Test truncate rollback on a basic table +SELECT COUNT(*) FROM basic_table; + count +--------------------------------------------------------------------- + 10 +(1 row) + +BEGIN; +TRUNCATE basic_table; +SELECT COUNT(*) FROM basic_table; + count +--------------------------------------------------------------------- + 0 +(1 row) + +ROLLBACK; +SELECT COUNT(*) FROM basic_table; + count +--------------------------------------------------------------------- + 10 +(1 row) + +-- Test truncate on a basic table +SELECT COUNT(*) FROM basic_table; + count +--------------------------------------------------------------------- + 10 +(1 row) + +TRUNCATE basic_table; +SELECT COUNT(*) FROM basic_table; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- Test trucate rollback on partitioned table +SELECT COUNT(*) FROM partitioned_table_0; + count +--------------------------------------------------------------------- + 5 +(1 row) + +BEGIN; +TRUNCATE partitioned_table; +SELECT COUNT(*) FROM partitioned_table_0; + count +--------------------------------------------------------------------- + 0 +(1 row) + +ROLLBACK; +SELECT COUNT(*) FROM partitioned_table_0; + count +--------------------------------------------------------------------- + 5 +(1 row) + +-- Test truncate a partioned table +SELECT COUNT(*) FROM partitioned_table; + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT COUNT(*) FROM partitioned_table_1; + count +--------------------------------------------------------------------- + 5 +(1 row) + +TRUNCATE partitioned_table; +SELECT COUNT(*) FROM partitioned_table; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM partitioned_table_1; + count +--------------------------------------------------------------------- + 0 +(1 row) + diff --git a/src/test/regress/expected/arbitrary_configs_truncate_cascade.out b/src/test/regress/expected/arbitrary_configs_truncate_cascade.out new file mode 100644 index 000000000..adf8a3cfc --- /dev/null +++ b/src/test/regress/expected/arbitrary_configs_truncate_cascade.out @@ -0,0 +1,84 @@ +SET search_path TO truncate_cascade_tests_schema; +-- Test truncate error on table with dependencies +TRUNCATE table_with_pk; +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "table_with_fk_1" references "table_with_pk". +HINT: Truncate table "table_with_fk_1" at the same time, or use TRUNCATE ... CASCADE. +-- Test truncate rollback on table with dependencies +SELECT COUNT(*) FROM table_with_fk_1; + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_2; + count +--------------------------------------------------------------------- + 10 +(1 row) + +BEGIN; +TRUNCATE table_with_pk CASCADE; +SELECT COUNT(*) FROM table_with_fk_1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_2; + count +--------------------------------------------------------------------- + 0 +(1 row) + +ROLLBACK; +SELECT COUNT(*) FROM table_with_fk_1; + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_2; + count +--------------------------------------------------------------------- + 10 +(1 row) + +-- Test truncate on table with dependencies +SELECT COUNT(*) FROM table_with_pk; + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_1; + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_2; + count +--------------------------------------------------------------------- + 10 +(1 row) + +TRUNCATE table_with_pk CASCADE; +SELECT COUNT(*) FROM table_with_pk; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM table_with_fk_2; + count +--------------------------------------------------------------------- + 0 +(1 row) + diff --git a/src/test/regress/expected/arbitrary_configs_truncate_cascade_create.out b/src/test/regress/expected/arbitrary_configs_truncate_cascade_create.out new file mode 100644 index 000000000..00caa435e --- /dev/null +++ b/src/test/regress/expected/arbitrary_configs_truncate_cascade_create.out @@ -0,0 +1,29 @@ +CREATE SCHEMA truncate_cascade_tests_schema; +SET search_path TO truncate_cascade_tests_schema; +-- tables connected with foreign keys +CREATE TABLE table_with_pk(a bigint PRIMARY KEY); +CREATE TABLE table_with_fk_1(a bigint, b bigint, FOREIGN KEY (b) REFERENCES table_with_pk(a)); +CREATE TABLE table_with_fk_2(a bigint, b bigint, FOREIGN KEY (b) REFERENCES table_with_pk(a)); +-- distribute tables +SELECT create_reference_table('table_with_pk'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('table_with_fk_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_reference_table('table_with_fk_2'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- fill tables with data +INSERT INTO table_with_pk(a) SELECT n FROM generate_series(1, 10) n; +INSERT INTO table_with_fk_1(a, b) SELECT n, n FROM generate_series(1, 10) n; +INSERT INTO table_with_fk_2(a, b) SELECT n, n FROM generate_series(1, 10) n; diff --git a/src/test/regress/expected/arbitrary_configs_truncate_create.out b/src/test/regress/expected/arbitrary_configs_truncate_create.out new file mode 100644 index 000000000..ba19ab64f --- /dev/null +++ b/src/test/regress/expected/arbitrary_configs_truncate_create.out @@ -0,0 +1,26 @@ +CREATE SCHEMA truncate_tests_schema; +SET search_path TO truncate_tests_schema; +-- simple table +CREATE TABLE basic_table(a int); +-- partioned table +CREATE TABLE partitioned_table(a int) PARTITION BY RANGE(a); +CREATE TABLE partitioned_table_0 PARTITION OF partitioned_table +FOR VALUES FROM (1) TO (6); +CREATE TABLE partitioned_table_1 PARTITION OF partitioned_table +FOR VALUES FROM (6) TO (11); +-- distribute tables +SELECT create_distributed_table('basic_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('partitioned_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- fill tables with data +INSERT INTO basic_table(a) SELECT n FROM generate_series(1, 10) n; +INSERT INTO partitioned_table(a) SELECT n FROM generate_series(1, 10) n; diff --git a/src/test/regress/expected/arbitrary_configs_truncate_partition.out b/src/test/regress/expected/arbitrary_configs_truncate_partition.out new file mode 100644 index 000000000..2a8dde4bf --- /dev/null +++ b/src/test/regress/expected/arbitrary_configs_truncate_partition.out @@ -0,0 +1,39 @@ +SET search_path TO truncate_partition_tests_schema; +-- Test truncate on a partition +SELECT COUNT(*) FROM partitioned_table; + count +--------------------------------------------------------------------- + 10 +(1 row) + +SELECT COUNT(*) FROM partitioned_table_0; + count +--------------------------------------------------------------------- + 5 +(1 row) + +SELECT COUNT(*) FROM partitioned_table_1; + count +--------------------------------------------------------------------- + 5 +(1 row) + +TRUNCATE partitioned_table_0; +SELECT COUNT(*) FROM partitioned_table; + count +--------------------------------------------------------------------- + 5 +(1 row) + +SELECT COUNT(*) FROM partitioned_table_0; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM partitioned_table_1; + count +--------------------------------------------------------------------- + 5 +(1 row) + diff --git a/src/test/regress/expected/arbitrary_configs_truncate_partition_create.out b/src/test/regress/expected/arbitrary_configs_truncate_partition_create.out new file mode 100644 index 000000000..3ffbb036a --- /dev/null +++ b/src/test/regress/expected/arbitrary_configs_truncate_partition_create.out @@ -0,0 +1,17 @@ +CREATE SCHEMA truncate_partition_tests_schema; +SET search_path TO truncate_partition_tests_schema; +-- partioned table +CREATE TABLE partitioned_table(a int) PARTITION BY RANGE(a); +CREATE TABLE partitioned_table_0 PARTITION OF partitioned_table +FOR VALUES FROM (1) TO (6); +CREATE TABLE partitioned_table_1 PARTITION OF partitioned_table +FOR VALUES FROM (6) TO (11); +-- distribute tables +SELECT create_distributed_table('partitioned_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- fill tables with data +INSERT INTO partitioned_table(a) SELECT n FROM generate_series(1, 10) n; diff --git a/src/test/regress/expected/check_mx.out b/src/test/regress/expected/check_mx.out index 6a030bc31..b19445229 100644 --- a/src/test/regress/expected/check_mx.out +++ b/src/test/regress/expected/check_mx.out @@ -10,3 +10,13 @@ SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; t (1 row) +-- Show that pg_dist_object entities are same on all nodes +SELECT pg_identify_object_as_address(classid, objid, objsubid)::text +FROM pg_catalog.pg_dist_object + EXCEPT +SELECT unnest(result::text[]) AS unnested_result +FROM run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) from pg_catalog.pg_dist_object$$); + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/columnar_create.out b/src/test/regress/expected/columnar_create.out index 9cb025336..9b5fdace9 100644 --- a/src/test/regress/expected/columnar_create.out +++ b/src/test/regress/expected/columnar_create.out @@ -185,3 +185,5 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_sto f (1 row) +-- make sure citus_columnar can be loaded +LOAD 'citus_columnar'; diff --git a/src/test/regress/expected/data_types.out b/src/test/regress/expected/data_types.out index 4c8539758..28ef76bb1 100644 --- a/src/test/regress/expected/data_types.out +++ b/src/test/regress/expected/data_types.out @@ -183,5 +183,18 @@ INSERT INTO data_types_table SELECT * FROM data_types_table ON CONFLICT (dist_ke INSERT INTO data_types_table SELECT * FROM data_types_table LIMIT 100000 ON CONFLICT (dist_key) DO UPDATE SET useless_column = 10; INSERT INTO data_types_table (dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38) SELECT dist_key+1, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38 FROM data_types_table ON CONFLICT (dist_key) DO UPDATE SET useless_column = 10; +-- test type names that start with underscore +CREATE TYPE underscore_type_1 AS (a INT); +CREATE TYPE _underscore_type_1 AS (a INT); +CREATE TYPE underscore_type_2 AS ENUM ('a'); +CREATE TYPE _underscore_type_2 AS ENUM ('a'); +SELECT result FROM run_command_on_all_nodes('SELECT count(*) FROM pg_type WHERE typname LIKE ''%underscore\_type%'''); + result +--------------------------------------------------------------------- + 8 + 8 + 8 +(3 rows) + SET client_min_messages TO ERROR; DROP SCHEMA data_types CASCADE; diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index bc6a5a859..21b3f7f8e 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -179,3 +179,41 @@ HINT: Connect to the coordinator and run it again. SET citus.enable_ddl_propagation TO off; DROP SCHEMA collation_creation_on_worker; SET citus.enable_ddl_propagation TO on; +\c - - - :master_port +-- will skip trying to propagate the collation due to temp schema +CREATE COLLATION pg_temp.temp_collation (provider = icu, locale = 'de-u-co-phonebk'); +WARNING: "collation pg_temp_xxx.temp_collation" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "collation pg_temp_xxx.temp_collation" will be created only locally +SET client_min_messages TO ERROR; +CREATE USER alter_collation_user; +SELECT 1 FROM run_command_on_workers('CREATE USER alter_collation_user'); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +RESET client_min_messages; +CREATE COLLATION alter_collation FROM "C"; +ALTER COLLATION alter_collation OWNER TO alter_collation_user; +SELECT result FROM run_command_on_all_nodes(' + SELECT collowner::regrole FROM pg_collation WHERE collname = ''alter_collation''; +'); + result +--------------------------------------------------------------------- + alter_collation_user + alter_collation_user + alter_collation_user +(3 rows) + +DROP COLLATION alter_collation; +SET client_min_messages TO ERROR; +DROP USER alter_collation_user; +SELECT 1 FROM run_command_on_workers('DROP USER alter_collation_user'); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +RESET client_min_messages; diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 3534b12a2..bf92d680a 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -80,6 +80,33 @@ CREATE FUNCTION add_polygons(polygon, polygon) RETURNS int LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; +CREATE FUNCTION agg_dummy_func(state int, item int) +RETURNS int IMMUTABLE LANGUAGE plpgsql AS $$ +begin + return state + item; +end; +$$; +SET client_min_messages TO WARNING; +-- will skip trying to propagate the aggregate due to temp schema +CREATE AGGREGATE pg_temp.dummy_agg(int) ( + sfunc = agg_dummy_func, + stype = int, + sspace = 8, + finalfunc = agg_dummy_func, + finalfunc_extra, + initcond = '5', + msfunc = agg_dummy_func, + mstype = int, + msspace = 12, + minvfunc = agg_dummy_func, + mfinalfunc = agg_dummy_func, + mfinalfunc_extra, + minitcond = '1', + sortop = ">" +); +WARNING: "function pg_temp_xxx.dummy_agg(integer)" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "function pg_temp_xxx.dummy_agg(integer)" will be created only locally +RESET client_min_messages; -- Test some combination of functions without ddl propagation -- This will prevent the workers from having those types created. They are -- created just-in-time on function distribution diff --git a/src/test/regress/expected/distributed_planning.out b/src/test/regress/expected/distributed_planning.out index 42dac188d..f05e46d3a 100644 --- a/src/test/regress/expected/distributed_planning.out +++ b/src/test/regress/expected/distributed_planning.out @@ -831,3 +831,390 @@ SELECT count(*), t1.event FROM date_part_table t1 JOIN date_part_table t2 USING 4 | 1 (5 rows) +TRUNCATE test; +TRUNCATE ref; +insert into test(x, y) SELECT 1, i FROM generate_series(1, 10) i; +insert into test(x, y) SELECT 3, i FROM generate_series(11, 40) i; +insert into test(x, y) SELECT i, 1 FROM generate_series(1, 10) i; +insert into test(x, y) SELECT i, 3 FROM generate_series(11, 40) i; +insert into ref(a, b) SELECT i, 1 FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT i, 3 FROM generate_series(11, 40) i; +insert into ref(a, b) SELECT 1, i FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT 3, i FROM generate_series(11, 40) i; +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.x + LIMIT 2 + ) q; + count +--------------------------------------------------------------------- + 122 +(1 row) + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.y + LIMIT 2 + ) q; + count +--------------------------------------------------------------------- + 122 +(1 row) + +-- Since the only correlates on the distribution column, this can be safely +-- pushed down. But this is currently considered to hard to detect, so we fail. +-- +-- SELECT count(*) +-- FROM ref, +-- LATERAL ( +-- SELECT +-- test.x +-- FROM test +-- WHERE +-- test.x = ref.a +-- LIMIT 2 +-- ) q; +-- This returns wrong results when pushed down. Instead of returning 2 rows, +-- for each row in the reference table. It would return (2 * number of shards) +-- rows for each row in the reference table. +-- See issue #5327 +-- +-- SELECT count(*) +-- FROM ref, +-- LATERAL ( +-- SELECT +-- test.y +-- FROM test +-- WHERE +-- test.y = ref.a +-- LIMIT 2 +-- ) q; +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q; + count +--------------------------------------------------------------------- + 122 +(1 row) + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.x = test.x + LIMIT 2 + ) q; + count +--------------------------------------------------------------------- + 122 +(1 row) + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q JOIN test ON test.x = q.y; + count +--------------------------------------------------------------------- + 2202 +(1 row) + +-- Would require repartitioning to work with subqueries +-- +-- SELECT count(*) +-- FROM test, +-- LATERAL ( +-- SELECT +-- test_2.x +-- FROM test test_2 +-- WHERE +-- test_2.x = test.y +-- LIMIT 2 +-- ) q ; +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q +; + count +--------------------------------------------------------------------- + 1222 +(1 row) + +SELECT count(*) +FROM ref JOIN test on ref.b = test.y, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q +; + count +--------------------------------------------------------------------- + 1222 +(1 row) + +-- Too complex joins for Citus to handle currently +-- +-- SELECT count(*) +-- FROM ref JOIN test on ref.b = test.x, +-- LATERAL ( +-- SELECT +-- test_2.x +-- FROM test test_2 +-- WHERE +-- test_2.x = ref.a +-- LIMIT 2 +-- ) q +-- ; +-- Would require repartitioning to work with subqueries +-- +-- SELECT count(*) +-- FROM ref JOIN test on ref.b = test.x, +-- LATERAL ( +-- SELECT +-- test_2.y +-- FROM test test_2 +-- WHERE +-- test_2.y = ref.a +-- LIMIT 2 +-- ) q +-- ; +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.x + LIMIT 2 + ) q +; + count +--------------------------------------------------------------------- + 2202 +(1 row) + +-- Without LIMIT clauses +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.x + ) q; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.y + ) q; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.x + FROM test + WHERE + test.x = ref.a + ) q; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + ) q; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.x = test.x + ) q; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.y + ) q ; + count +--------------------------------------------------------------------- + 1120 +(1 row) + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q +; + count +--------------------------------------------------------------------- + 2960 +(1 row) + +SELECT count(*) +FROM ref JOIN test on ref.b = test.y, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q +; + count +--------------------------------------------------------------------- + 2960 +(1 row) + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = ref.a + ) q +; + count +--------------------------------------------------------------------- + 2960 +(1 row) + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.y = ref.a + ) q +; + count +--------------------------------------------------------------------- + 2960 +(1 row) + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.x + ) q +; + count +--------------------------------------------------------------------- + 31160 +(1 row) + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q JOIN test ON test.x = q.y; + count +--------------------------------------------------------------------- + 31160 +(1 row) + diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 69684c702..02d419d9b 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -88,6 +88,7 @@ SELECT * FROM t3; 4 | (5,6) (1 row) +COMMIT; -- verify typmod was propagated SELECT run_command_on_workers($$SELECT atttypmod FROM pg_attribute WHERE attnum = 1 AND attrelid = (SELECT typrelid FROM pg_type WHERE typname = 'tc2');$$); run_command_on_workers @@ -96,7 +97,6 @@ SELECT run_command_on_workers($$SELECT atttypmod FROM pg_attribute WHERE attnum (localhost,57638,t,14) (2 rows) -COMMIT; -- transaction block with simple type BEGIN; CREATE TYPE te2 AS ENUM ('yes', 'no'); @@ -566,6 +566,44 @@ CREATE TYPE circ_type1 AS (a int); CREATE TYPE circ_type2 AS (a int, b circ_type1); ALTER TYPE circ_type1 ADD ATTRIBUTE b circ_type2; ERROR: composite type circ_type1 cannot be made a member of itself +-- Show that types can be created locally if has unsupported dependency +CREATE TYPE text_local_def; +CREATE FUNCTION text_local_def_in(cstring) + RETURNS text_local_def + AS 'textin' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: return type text_local_def is only a shell +WARNING: "function text_local_def_in(cstring)" has dependency on unsupported object "type text_local_def" +DETAIL: "function text_local_def_in(cstring)" will be created only locally +CREATE FUNCTION text_local_def_out(text_local_def) + RETURNS cstring + AS 'textout' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: argument type text_local_def is only a shell +WARNING: "function text_local_def_out(text_local_def)" has dependency on unsupported object "type text_local_def" +DETAIL: "function text_local_def_out(text_local_def)" will be created only locally +CREATE TYPE text_local_def ( + internallength = variable, + input = text_local_def_in, + output = text_local_def_out, + alignment = int4, + default = 'zippo' +); +-- It should be created locally as it has unsupported dependency +CREATE TYPE default_test_row AS (f1 text_local_def, f2 int4); +WARNING: "type default_test_row" has dependency on unsupported object "type text_local_def" +DETAIL: "type default_test_row" will be created only locally +-- Distributing table depending on that type should error out +CREATE TABLE table_text_local_def(id int, col_1 default_test_row); +SELECT create_distributed_table('table_text_local_def','id'); +ERROR: "table table_text_local_def" has dependency on unsupported object "type text_local_def" +-- will skip trying to propagate the type/enum due to temp schema +CREATE TYPE pg_temp.temp_type AS (int_field int); +WARNING: "type temp_type" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "type temp_type" will be created only locally +CREATE TYPE pg_temp.temp_enum AS ENUM ('one', 'two', 'three'); +WARNING: "type temp_enum" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "type temp_enum" will be created only locally -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out index dc6cf5d69..297199a98 100644 --- a/src/test/regress/expected/function_propagation.out +++ b/src/test/regress/expected/function_propagation.out @@ -92,6 +92,7 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas -- Have a separate check for type created in transaction BEGIN; + SET LOCAL citus.create_object_propagation TO deferred; CREATE TYPE function_prop_type_3 AS (a int, b int); COMMIT; -- Objects in the body part is not found as dependency @@ -209,16 +210,17 @@ BEGIN; return 1; END; $$; - -- Within transaction functions are not distributed SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; - pg_identify_object_as_address + pg_identify_object_as_address --------------------------------------------------------------------- -(0 rows) + (type,{function_propagation_schema.type_in_transaction},{}) +(1 row) SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; - pg_identify_object_as_address + pg_identify_object_as_address --------------------------------------------------------------------- -(0 rows) + (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) +(1 row) COMMIT; -- Show that recreating it outside transaction distributes the function and dependencies @@ -1277,6 +1279,25 @@ ALTER TABLE table_1_for_circ_dep_3 ADD COLUMN col_2 table_2_for_circ_dep_3; SELECT create_distributed_table('table_1_for_circ_dep_3','id'); ERROR: Citus can not handle circular dependencies between distributed objects DETAIL: "table table_1_for_circ_dep_3" circularly depends itself, resolve circular dependency first +-- will skip trying to propagate the function due to temp schema +CREATE FUNCTION pg_temp.temp_func(group_size BIGINT) RETURNS SETOF integer[] +AS $$ + SELECT array_agg(s) OVER w + FROM generate_series(1,5) s + WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING) +$$ LANGUAGE SQL STABLE; +WARNING: "function pg_temp_xxx.temp_func(bigint)" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "function pg_temp_xxx.temp_func(bigint)" will be created only locally +SELECT create_distributed_function('pg_temp.temp_func(BIGINT)'); +ERROR: "function pg_temp_xxx.temp_func(bigint)" has dependency on unsupported object "schema pg_temp_xxx" +-- Show that support functions are supported +CREATE FUNCTION func_with_support(int, int) RETURNS bool + LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE + AS $$int4eq$$ SUPPORT generate_series_int8_support; +CREATE FUNCTION func_with_support_2(int, int) RETURNS bool + LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE + AS $$int4eq$$; +ALTER FUNCTION func_with_support_2(int, int) SUPPORT generate_series_int8_support; RESET search_path; SET client_min_messages TO WARNING; DROP SCHEMA function_propagation_schema CASCADE; diff --git a/src/test/regress/expected/global_cancel.out b/src/test/regress/expected/global_cancel.out index ede4bc7e3..6a206b0ad 100644 --- a/src/test/regress/expected/global_cancel.out +++ b/src/test/regress/expected/global_cancel.out @@ -62,10 +62,9 @@ CONTEXT: while executing command on localhost:xxxxx SELECT pg_cancel_backend(citus_backend_gpid()); ERROR: canceling statement due to user request \c - postgres - :master_port -SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodeport = :master_port \gset SET client_min_messages TO DEBUG; -- 10000000000 is the node id multiplier for global pid -SELECT pg_cancel_backend(10000000000 * :coordinator_node_id + 0); +SELECT pg_cancel_backend(10000000000 * citus_coordinator_nodeid() + 0); DEBUG: PID 0 is not a PostgreSQL server process DETAIL: from localhost:xxxxx pg_cancel_backend @@ -73,7 +72,7 @@ DETAIL: from localhost:xxxxx f (1 row) -SELECT pg_terminate_backend(10000000000 * :coordinator_node_id + 0); +SELECT pg_terminate_backend(10000000000 * citus_coordinator_nodeid() + 0); DEBUG: PID 0 is not a PostgreSQL server process DETAIL: from localhost:xxxxx pg_terminate_backend @@ -82,7 +81,7 @@ DETAIL: from localhost:xxxxx (1 row) RESET client_min_messages; -SELECT citus_backend_gpid() = citus_calculate_gpid(:coordinator_node_id, pg_backend_pid()); +SELECT citus_backend_gpid() = citus_calculate_gpid(citus_coordinator_nodeid(), pg_backend_pid()); ?column? --------------------------------------------------------------------- t diff --git a/src/test/regress/expected/index_create.out b/src/test/regress/expected/index_create.out new file mode 100644 index 000000000..6e2876fa4 --- /dev/null +++ b/src/test/regress/expected/index_create.out @@ -0,0 +1,40 @@ +CREATE SCHEMA index_create; +SET search_path TO index_create; +CREATE TABLE test_tbl (a INT NOT NULL PRIMARY KEY, b text, c BIGINT); +CREATE UNIQUE INDEX CONCURRENTLY a_index ON test_tbl (a); +SELECT create_distributed_table('test_tbl','a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- suppress the WARNING message: not propagating CLUSTER command to worker nodes +SET client_min_messages TO ERROR; +CLUSTER test_tbl USING test_tbl_pkey; +RESET client_min_messages; +BEGIN; + CREATE INDEX idx1 ON test_tbl (a) INCLUDE (b, c); + DROP TABLE test_tbl; +ROLLBACK; +CREATE INDEX idx1 ON test_tbl (a) INCLUDE (b, c) WHERE a > 10; +CREATE INDEX idx2 ON test_tbl (lower(b)); +CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); +-- create its partitions +CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); +CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); +-- create indexes on the parent +CREATE INDEX IF NOT EXISTS partitioned_idx_1 ON ONLY partitioning_test (id); +CREATE INDEX IF NOT EXISTS partitioned_idx_2 ON partitioning_test (id, time NULLS FIRST); +SELECT create_distributed_table('partitioning_test', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- create hash index on distributed partitioned table +CREATE INDEX partition_idx_hash ON partitioning_test USING hash (id); +-- change statistics of index +ALTER INDEX idx2 ALTER COLUMN 1 SET STATISTICS 1000; +-- test reindex +REINDEX INDEX idx1; +ALTER TABLE test_tbl REPLICA IDENTITY USING INDEX a_index; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index fda0ead1d..ab76d8596 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -14,7 +14,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -75,7 +75,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -86,10 +86,10 @@ step s2-public-schema: SET search_path TO public; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -107,7 +107,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -185,7 +185,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -246,10 +246,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -260,10 +260,10 @@ step s2-public-schema: SET search_path TO public; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -275,7 +275,7 @@ create_distributed_table (1 row) step s2-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects @@ -284,7 +284,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -362,7 +362,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -423,16 +423,16 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-public-schema: SET search_path TO public; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table --------------------------------------------------------------------- @@ -440,10 +440,10 @@ create_distributed_table (1 row) step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-worker: <... completed> ?column? @@ -461,7 +461,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -525,7 +525,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s1-commit s2-create-table s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -539,7 +539,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -600,7 +600,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -610,17 +610,17 @@ step s1-add-worker: step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; - -step s2-create-table: - CREATE TABLE t1 (a int, b int); - -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; -step s2-create-table: <... completed> +step s2-create-schema: <... completed> +step s2-create-table: + CREATE TABLE t1 (a int, b int); + -- session needs to have replication factor set to 1, can't do in setup + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); + create_distributed_table --------------------------------------------------------------------- @@ -633,7 +633,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -698,7 +698,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s1-commit s2-create-table s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -712,7 +712,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -773,10 +773,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -786,24 +786,24 @@ step s1-add-worker: step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; - -step s2-create-table: - CREATE TABLE t1 (a int, b int); - -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; -step s2-create-table: <... completed> +step s2-create-schema: <... completed> +step s2-create-table: + CREATE TABLE t1 (a int, b int); + -- session needs to have replication factor set to 1, can't do in setup + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); + create_distributed_table --------------------------------------------------------------------- (1 row) step s2-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects @@ -812,7 +812,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -877,7 +877,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s1-add-worker s2-create-table s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -891,7 +891,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -952,28 +952,28 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; -step s2-create-table: - CREATE TABLE t1 (a int, b int); +step s1-add-worker: + SELECT 1 FROM master_add_node('localhost', 57638); + +step s2-create-table: + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table --------------------------------------------------------------------- (1 row) -step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); - -step s2-commit: - COMMIT; +step s2-commit: + COMMIT; step s1-add-worker: <... completed> ?column? @@ -991,7 +991,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1070,7 +1070,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1135,10 +1135,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1146,10 +1146,10 @@ step s1-add-worker: (1 row) step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); step s1-commit: COMMIT; @@ -1161,7 +1161,7 @@ create_distributed_table (1 row) step s2-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects @@ -1170,7 +1170,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1249,7 +1249,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1307,7 +1307,7 @@ master_remove_node (1 row) step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1319,19 +1319,19 @@ step s2-create-schema: SET search_path TO myschema; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s3-use-schema: SET search_path TO myschema; step s2-create-table: - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table --------------------------------------------------------------------- @@ -1339,22 +1339,21 @@ create_distributed_table (1 row) step s3-create-table: - CREATE TABLE t2 (a int, b int); + CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t2', 'a'); - -step s2-commit: - COMMIT; + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t2', 'a'); -step s3-create-table: <... completed> create_distributed_table --------------------------------------------------------------------- (1 row) +step s2-commit: + COMMIT; + step s3-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects @@ -1363,7 +1362,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1429,7 +1428,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s1-commit s2-create-table s2-commit s3-create-table s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1443,7 +1442,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1504,13 +1503,13 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1520,43 +1519,43 @@ step s1-add-worker: step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; - -step s3-create-schema2: + +step s3-create-schema2: CREATE SCHEMA myschema2; SET search_path TO myschema2; - -step s2-create-table: - CREATE TABLE t1 (a int, b int); - -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); - -step s3-create-table: - CREATE TABLE t2 (a int, b int); - -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t2', 'a'); step s1-commit: COMMIT; -step s2-create-table: <... completed> +step s2-create-schema: <... completed> +step s3-create-schema2: <... completed> +step s2-create-table: + CREATE TABLE t1 (a int, b int); + -- session needs to have replication factor set to 1, can't do in setup + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); + create_distributed_table --------------------------------------------------------------------- (1 row) -step s3-create-table: <... completed> +step s2-commit: + COMMIT; + +step s3-create-table: + CREATE TABLE t2 (a int, b int); + -- session needs to have replication factor set to 1, can't do in setup + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t2', 'a'); + create_distributed_table --------------------------------------------------------------------- (1 row) step s3-commit: - COMMIT; - -step s2-commit: - COMMIT; + COMMIT; step s2-print-distributed-objects: -- print an overview of all distributed objects @@ -1565,7 +1564,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1646,7 +1645,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1707,7 +1706,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1718,7 +1717,7 @@ step s2-public-schema: SET search_path TO public; step s2-create-type: - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); step s1-commit: COMMIT; @@ -1731,7 +1730,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1809,7 +1808,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1873,10 +1872,10 @@ step s2-public-schema: SET search_path TO public; step s2-create-type: - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -1893,7 +1892,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -1971,7 +1970,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2032,20 +2031,20 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-create-schema: CREATE SCHEMA myschema; SET search_path TO myschema; step s2-create-type: - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); step s2-create-table-with-type: - CREATE TABLE t1 (a int, b tt1); + CREATE TABLE t1 (a int, b tt1); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); create_distributed_table --------------------------------------------------------------------- @@ -2053,10 +2052,10 @@ create_distributed_table (1 row) step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: - COMMIT; + COMMIT; step s1-add-worker: <... completed> ?column? @@ -2074,7 +2073,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2154,7 +2153,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2215,7 +2214,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -2239,10 +2238,10 @@ create_distributed_function (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-commit: - COMMIT; + COMMIT; step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); @@ -2259,7 +2258,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2337,7 +2336,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2410,10 +2409,10 @@ create_distributed_function (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-commit: - COMMIT; + COMMIT; step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); @@ -2424,7 +2423,7 @@ wait_until_metadata_sync (1 row) step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -2449,7 +2448,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2527,7 +2526,7 @@ step s1-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; @@ -2585,7 +2584,7 @@ master_remove_node (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-create-schema: CREATE SCHEMA myschema; @@ -2601,7 +2600,7 @@ create_distributed_function (1 row) step s2-commit: - COMMIT; + COMMIT; step s3-wait-for-metadata-sync: SELECT public.wait_until_metadata_sync(5000); @@ -2615,7 +2614,7 @@ step s1-begin: BEGIN; step s1-add-worker: - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); ?column? --------------------------------------------------------------------- @@ -2640,7 +2639,7 @@ step s2-print-distributed-objects: SELECT count(*) FROM pg_namespace where nspname = 'myschema'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created SELECT count(*) FROM pg_proc WHERE proname='add'; diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index b8ab7bc94..028ec21f0 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -471,15 +471,16 @@ step s2-create-extension-version-11: step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); + +step s2-commit: + COMMIT; +step s1-add-node-1: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s1-print: select count(*) from pg_catalog.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; @@ -489,7 +490,7 @@ step s1-print: count --------------------------------------------------------------------- - 6 + 7 (1 row) extname|extversion|nspname @@ -499,20 +500,20 @@ seg | 1.1|public run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") +(localhost,57637,t,seg) +(localhost,57638,t,seg) (2 rows) run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") +(localhost,57637,t,1.1) +(localhost,57638,t,1.1) (2 rows) run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") +(localhost,57637,t,public) +(localhost,57638,t,public) (2 rows) master_remove_node @@ -678,15 +679,16 @@ step s2-create-extension-with-schema1: step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); + +step s2-commit: + COMMIT; +step s1-add-node-1: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s1-print: select count(*) from pg_catalog.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; @@ -696,7 +698,7 @@ step s1-print: count --------------------------------------------------------------------- - 6 + 7 (1 row) extname|extversion|nspname @@ -706,20 +708,20 @@ seg | 1.3|schema1 run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") +(localhost,57637,t,seg) +(localhost,57638,t,seg) (2 rows) run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") +(localhost,57637,t,1.3) +(localhost,57638,t,1.3) (2 rows) run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") +(localhost,57637,t,schema1) +(localhost,57638,t,schema1) (2 rows) master_remove_node @@ -820,15 +822,16 @@ step s2-create-extension-version-11: step s1-remove-node-1: SELECT 1 FROM master_remove_node('localhost', 57637); + +step s2-commit: + COMMIT; +step s1-remove-node-1: <... completed> ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-commit: - COMMIT; - step s1-print: select count(*) from pg_catalog.pg_dist_object ; select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg'; @@ -838,7 +841,7 @@ step s1-print: count --------------------------------------------------------------------- - 6 + 7 (1 row) extname|extversion|nspname @@ -848,17 +851,17 @@ seg | 1.1|public run_command_on_workers --------------------------------------------------------------------- -(localhost,57638,t,"") +(localhost,57638,t,seg) (1 row) run_command_on_workers --------------------------------------------------------------------- -(localhost,57638,t,"") +(localhost,57638,t,1.1) (1 row) run_command_on_workers --------------------------------------------------------------------- -(localhost,57638,t,"") +(localhost,57638,t,public) (1 row) master_remove_node @@ -938,7 +941,7 @@ run_command_on_workers run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,schema2) +(localhost,57637,t,schema1) (localhost,57638,t,"") (2 rows) diff --git a/src/test/regress/expected/isolation_metadata_sync_vs_all.out b/src/test/regress/expected/isolation_metadata_sync_vs_all.out index 49ce8fa3e..c696bcb1e 100644 --- a/src/test/regress/expected/isolation_metadata_sync_vs_all.out +++ b/src/test/regress/expected/isolation_metadata_sync_vs_all.out @@ -253,8 +253,8 @@ start_metadata_sync_to_node (1 row) step s2-create-schema: - CREATE SCHEMA dist_schema - CREATE TABLE dist_table_in_schema(id int, data int); + CREATE SCHEMA dist_schema; + CREATE TABLE dist_schema.dist_table_in_schema(id int, data int); SELECT create_distributed_table('dist_schema.dist_table_in_schema', 'id'); step s1-commit: @@ -299,8 +299,8 @@ step s2-drop-schema: starting permutation: s2-create-schema s1-begin s2-begin s1-start-metadata-sync s2-drop-schema s1-commit s2-commit s3-compare-snapshot step s2-create-schema: - CREATE SCHEMA dist_schema - CREATE TABLE dist_table_in_schema(id int, data int); + CREATE SCHEMA dist_schema; + CREATE TABLE dist_schema.dist_table_in_schema(id int, data int); SELECT create_distributed_table('dist_schema.dist_table_in_schema', 'id'); create_distributed_table @@ -1017,8 +1017,8 @@ t starting permutation: s2-create-schema s1-begin s2-begin s2-drop-schema s1-start-metadata-sync s2-commit s1-commit s3-compare-snapshot step s2-create-schema: - CREATE SCHEMA dist_schema - CREATE TABLE dist_table_in_schema(id int, data int); + CREATE SCHEMA dist_schema; + CREATE TABLE dist_schema.dist_table_in_schema(id int, data int); SELECT create_distributed_table('dist_schema.dist_table_in_schema', 'id'); create_distributed_table @@ -1154,10 +1154,11 @@ start_metadata_sync_to_node step s2-create-type: CREATE TYPE my_type AS (a int, b int); - -step s1-commit: + +step s1-commit: COMMIT; +step s2-create-type: <... completed> step s2-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out index 6c63be9c1..c45405fdb 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out @@ -38,6 +38,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-delete-table-1 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -71,6 +76,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-update-table-2 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -104,6 +114,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-delete-table-2 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -137,6 +152,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-update-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks step s2-begin: @@ -176,6 +196,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-delete-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks step s2-begin: @@ -215,6 +240,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-insert-table-1 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -248,6 +278,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-insert-table-2 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -281,6 +316,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s2-begin s2-insert-table-3 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -314,6 +354,11 @@ mode|count --------------------------------------------------------------------- (0 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-update-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -344,6 +389,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-update-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -376,6 +426,11 @@ id|value 7| 2 (4 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-update-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -407,6 +462,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -437,6 +497,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -468,6 +533,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -498,6 +568,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -532,6 +607,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -563,6 +643,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -597,6 +682,11 @@ id|value 5| 5 (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -633,6 +723,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 step s1-begin: @@ -669,6 +764,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -700,6 +800,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s1-update-table-2 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 step s1-begin: @@ -731,6 +836,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-insert-table-2 s1-update-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -767,6 +877,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-2 s1-commit s2-commit s1-select-table-3 step s1-begin: @@ -803,6 +918,11 @@ id|value 5| 5 (3 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-1 s2-commit s1-commit step s1-begin: @@ -830,6 +950,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-2 s2-commit s1-commit step s1-begin: @@ -857,6 +982,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-3 s2-commit s1-commit step s1-begin: @@ -884,6 +1014,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-1 s2-commit s1-commit step s1-begin: @@ -911,6 +1046,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-2 s2-commit s1-commit step s1-begin: @@ -938,6 +1078,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-3 s2-commit s1-commit step s1-begin: @@ -965,6 +1110,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-1 s2-commit s1-commit step s1-begin: @@ -992,6 +1142,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-2 s2-commit s1-commit step s1-begin: @@ -1019,6 +1174,11 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-3 s2-commit s1-commit step s1-begin: @@ -1046,3 +1206,8 @@ step s2-commit: step s1-commit: COMMIT; +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_reference_copy_vs_all.out b/src/test/regress/expected/isolation_reference_copy_vs_all.out index a90eec3ef..c73cc3031 100644 --- a/src/test/regress/expected/isolation_reference_copy_vs_all.out +++ b/src/test/regress/expected/isolation_reference_copy_vs_all.out @@ -17,6 +17,11 @@ count 15 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_reference_table @@ -40,6 +45,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_reference_table @@ -67,6 +77,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count create_reference_table @@ -96,6 +111,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_reference_table @@ -114,6 +134,11 @@ count 11 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_reference_table @@ -133,6 +158,11 @@ count 20 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_reference_table @@ -152,6 +182,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_reference_table @@ -171,6 +206,11 @@ count 9 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_reference_table @@ -190,6 +230,11 @@ count 0 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_reference_table @@ -205,6 +250,11 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; ERROR: relation "reference_copy" does not exist +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_reference_table @@ -227,10 +277,15 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) -(localhost,57638,t,1) +(localhost,57637,t,2) +(localhost,57638,t,2) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_reference_table @@ -258,6 +313,11 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table @@ -280,10 +340,15 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) -(localhost,57638,t,1) +(localhost,57637,t,2) +(localhost,57638,t,2) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -310,6 +375,11 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -337,6 +407,11 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -363,6 +438,11 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_reference_table @@ -386,6 +466,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_reference_table @@ -405,6 +490,11 @@ count 0 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_reference_table @@ -431,6 +521,11 @@ count 15 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_reference_table @@ -454,6 +549,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_reference_table @@ -481,6 +581,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count create_reference_table @@ -510,6 +615,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_reference_table @@ -528,6 +638,11 @@ count 11 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_reference_table @@ -547,6 +662,11 @@ count 15 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_reference_table @@ -566,6 +686,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_reference_table @@ -585,6 +710,11 @@ count 9 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_reference_table @@ -604,6 +734,11 @@ count 5 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_reference_table @@ -620,6 +755,11 @@ step s2-copy: <... completed> ERROR: relation "reference_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM reference_copy; ERROR: relation "reference_copy" does not exist +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table @@ -642,10 +782,15 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) -(localhost,57638,t,1) +(localhost,57637,t,2) +(localhost,57638,t,2) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table @@ -673,6 +818,11 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -700,6 +850,11 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -727,6 +882,11 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -753,6 +913,11 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_reference_table @@ -776,6 +941,11 @@ count 10 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_reference_table @@ -795,6 +965,11 @@ count 5 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_reference_table @@ -821,3 +996,8 @@ count 15 (1 row) +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_turn_mx_off.out b/src/test/regress/expected/isolation_turn_mx_off.out deleted file mode 100644 index 4c004c2fa..000000000 --- a/src/test/regress/expected/isolation_turn_mx_off.out +++ /dev/null @@ -1,21 +0,0 @@ -Parsed test spec with 1 sessions - -starting permutation: disable-mx-by-default reload stop-metadata-sync -step disable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; - -step reload: - SELECT pg_reload_conf(); - -pg_reload_conf ---------------------------------------------------------------------- -t -(1 row) - -step stop-metadata-sync: - SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; - -stop_metadata_sync_to_node ---------------------------------------------------------------------- -(0 rows) - diff --git a/src/test/regress/expected/isolation_turn_mx_off_0.out b/src/test/regress/expected/isolation_turn_mx_off_0.out deleted file mode 100644 index bb41b2412..000000000 --- a/src/test/regress/expected/isolation_turn_mx_off_0.out +++ /dev/null @@ -1,23 +0,0 @@ -Parsed test spec with 1 sessions - -starting permutation: disable-mx-by-default reload stop-metadata-sync -step disable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; - -step reload: - SELECT pg_reload_conf(); - -pg_reload_conf ---------------------------------------------------------------------- -t -(1 row) - -step stop-metadata-sync: - SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; - -stop_metadata_sync_to_node ---------------------------------------------------------------------- - - -(2 rows) - diff --git a/src/test/regress/expected/isolation_turn_mx_on.out b/src/test/regress/expected/isolation_turn_mx_on.out deleted file mode 100644 index 8f65d92bd..000000000 --- a/src/test/regress/expected/isolation_turn_mx_on.out +++ /dev/null @@ -1,21 +0,0 @@ -Parsed test spec with 1 sessions - -starting permutation: enable-mx-by-default reload start-metadata-sync -step enable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync TO ON; - -step reload: - SELECT pg_reload_conf(); - -pg_reload_conf ---------------------------------------------------------------------- -t -(1 row) - -step start-metadata-sync: - SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; - -start_metadata_sync_to_node ---------------------------------------------------------------------- -(0 rows) - diff --git a/src/test/regress/expected/isolation_turn_mx_on_0.out b/src/test/regress/expected/isolation_turn_mx_on_0.out deleted file mode 100644 index bf173e1ab..000000000 --- a/src/test/regress/expected/isolation_turn_mx_on_0.out +++ /dev/null @@ -1,23 +0,0 @@ -Parsed test spec with 1 sessions - -starting permutation: enable-mx-by-default reload start-metadata-sync -step enable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync TO ON; - -step reload: - SELECT pg_reload_conf(); - -pg_reload_conf ---------------------------------------------------------------------- -t -(1 row) - -step start-metadata-sync: - SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; - -start_metadata_sync_to_node ---------------------------------------------------------------------- - - -(2 rows) - diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 2658ca5ef..ff2b76f00 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -1,6 +1,6 @@ -Parsed test spec with 2 sessions +Parsed test spec with 3 sessions -starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes +starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes s3-update-node-1-back s3-update-node-2-back s3-manually-fix-metadata nodeid|nodename |nodeport --------------------------------------------------------------------- 22|localhost| 57638 @@ -47,12 +47,50 @@ nodeid|nodename |nodeport|isactive 22|localhost| 58638|t (2 rows) +step s3-update-node-1-back: + SELECT 1 FROM master_update_node( + (select nodeid from pg_dist_node where nodeport = 58637), + 'localhost', + 57637); + +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s3-update-node-2-back: + SELECT 1 FROM master_update_node( + (select nodeid from pg_dist_node where nodeport = 58638), + 'localhost', + 57638); + +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s3-manually-fix-metadata: + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57637; + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57638; + SELECT start_metadata_sync_to_node('localhost', 57637); + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + nodeid|nodename|nodeport --------------------------------------------------------------------- (0 rows) -starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes +starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes s3-update-node-1-back s3-manually-fix-metadata nodeid|nodename |nodeport --------------------------------------------------------------------- 24|localhost| 57638 @@ -105,60 +143,31 @@ nodeid|nodename |nodeport|isactive 23|localhost| 58637|t (2 rows) -nodeid|nodename|nodeport ---------------------------------------------------------------------- -(0 rows) - - -starting permutation: s1-begin s1-update-node-1 s2-start-metadata-sync-node-2 s1-commit s2-verify-metadata -nodeid|nodename |nodeport ---------------------------------------------------------------------- - 26|localhost| 57638 - 25|localhost| 57637 -(2 rows) - -step s1-begin: - BEGIN; - -step s1-update-node-1: +step s3-update-node-1-back: SELECT 1 FROM master_update_node( - (select nodeid from pg_dist_node where nodeport = 57637), + (select nodeid from pg_dist_node where nodeport = 58637), 'localhost', - 58637); + 57637); ?column? --------------------------------------------------------------------- 1 (1 row) -step s2-start-metadata-sync-node-2: +step s3-manually-fix-metadata: + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57637; + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57638; + SELECT start_metadata_sync_to_node('localhost', 57637); SELECT start_metadata_sync_to_node('localhost', 57638); - -step s1-commit: - COMMIT; -step s2-start-metadata-sync-node-2: <... completed> start_metadata_sync_to_node --------------------------------------------------------------------- (1 row) -step s2-verify-metadata: - SELECT nodeid, groupid, nodename, nodeport FROM pg_dist_node ORDER BY nodeid; - SELECT master_run_on_worker( - ARRAY['localhost'], ARRAY[57638], - ARRAY['SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport) ORDER BY nodeid) FROM pg_dist_node'], - false); - -nodeid|groupid|nodename |nodeport +start_metadata_sync_to_node --------------------------------------------------------------------- - 25| 25|localhost| 58637 - 26| 26|localhost| 57638 -(2 rows) -master_run_on_worker ---------------------------------------------------------------------- -(localhost,57638,t,"[{""f1"": 10, ""f2"": 10, ""f3"": ""localhost"", ""f4"": 57638}]") (1 row) nodeid|nodename|nodeport @@ -166,11 +175,11 @@ nodeid|nodename|nodeport (0 rows) -starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s2-drop-table +starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s3-manually-fix-metadata nodeid|nodename |nodeport --------------------------------------------------------------------- - 28|localhost| 57638 - 27|localhost| 57637 + 26|localhost| 57638 + 25|localhost| 57637 (2 rows) step s2-create-table: @@ -261,8 +270,21 @@ step s1-update-node-existent: 1 (1 row) -step s2-drop-table: - DROP TABLE test; +step s3-manually-fix-metadata: + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57637; + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57638; + SELECT start_metadata_sync_to_node('localhost', 57637); + SELECT start_metadata_sync_to_node('localhost', 57638); + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) nodeid|nodename|nodeport --------------------------------------------------------------------- diff --git a/src/test/regress/expected/local_table_join.out b/src/test/regress/expected/local_table_join.out index f2abb1066..effe23b0d 100644 --- a/src/test/regress/expected/local_table_join.out +++ b/src/test/regress/expected/local_table_join.out @@ -1392,11 +1392,8 @@ DEBUG: generating subplan XXX_1 for subquery SELECT true AS bool FROM pg_am LIM DEBUG: Wrapping relation "custom_pg_type" to a subquery DEBUG: generating subplan XXX_2 for subquery SELECT typdefault FROM local_table_join.custom_pg_type WHERE true DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT typdefault FROM (SELECT subq_1.typdefault FROM (SELECT custom_pg_type.typdefault FROM (SELECT custom_pg_type_1.typdefault FROM (SELECT intermediate_result.typdefault FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(typdefault text)) custom_pg_type_1) custom_pg_type, LATERAL (SELECT tbl.a FROM local_table_join.tbl WHERE (custom_pg_type.typdefault OPERATOR(pg_catalog.>) 'a'::text) LIMIT 1) subq_0 WHERE (SELECT intermediate_result.bool FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(bool boolean))) subq_1) subq_2 - typdefault ---------------------------------------------------------------------- - b -(1 row) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables -- Not supported because of 4470 select typdefault from ( select typdefault from ( diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 9651fcc63..e26a84a08 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -593,11 +593,11 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal gpid=10000000001'; - \set VERBOSITY terse - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) - AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SET application_name to 'citus_internal gpid=10000000001'; + \set VERBOSITY terse + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) + AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) + SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: unrecognized object type "non_existing_type" ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId @@ -637,11 +637,11 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal gpid=10000000001'; - \set VERBOSITY terse - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) - AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SET application_name to 'citus_internal gpid=10000000001'; + \set VERBOSITY terse + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) + AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) + SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: role "non_existing_user" does not exist ROLLBACK; -- since citus_internal_add_object_metadata is strict function returns NULL @@ -653,11 +653,11 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal gpid=10000000001'; - \set VERBOSITY terse - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) - AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SET application_name to 'citus_internal gpid=10000000001'; + \set VERBOSITY terse + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) + AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) + SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; citus_internal_add_object_metadata --------------------------------------------------------------------- @@ -711,6 +711,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; + SET citus.enable_ddl_propagation TO OFF; \set VERBOSITY terse CREATE TYPE distributed_test_type AS (a int, b int); SET ROLE metadata_sync_helper_role; @@ -1323,6 +1324,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ERROR: cannot colocate tables test_6 and test_5 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SET citus.enable_ddl_propagation TO OFF; CREATE COLLATION collation_t1 (provider = icu, locale = 'de-u-co-phonebk'); CREATE COLLATION caseinsensitive (provider = icu, locale = 'und-u-ks-level2'); -- colocated hash distributed table should have the same dist key collations diff --git a/src/test/regress/expected/multi_colocated_shard_rebalance.out b/src/test/regress/expected/multi_colocated_shard_rebalance.out index 939414ef9..94097218b 100644 --- a/src/test/regress/expected/multi_colocated_shard_rebalance.out +++ b/src/test/regress/expected/multi_colocated_shard_rebalance.out @@ -492,11 +492,12 @@ SELECT "Constraint", "Definition" FROM table_fkeys WHERE "Constraint" LIKE 'table2_group%' OR "Constraint" LIKE 'table1_group%'; Constraint | Definition --------------------------------------------------------------------- + table2_group1_table1_id_fkey | FOREIGN KEY (table1_id) REFERENCES table1_group1(id) table2_group1_table1_id_fkey_13000028 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000022(id) table2_group1_table1_id_fkey_13000029 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000023(id) table2_group1_table1_id_fkey_13000031 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000025(id) table2_group1_table1_id_fkey_13000033 | FOREIGN KEY (table1_id) REFERENCES table1_group1_13000027(id) -(4 rows) +(5 rows) \c - - - :master_port -- test shard copy with foreign constraints diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out index 048397aec..f72d8debe 100644 --- a/src/test/regress/expected/multi_explain.out +++ b/src/test/regress/expected/multi_explain.out @@ -3043,5 +3043,18 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1) -> Task Node: host=localhost port=xxxxx dbname=regression -> Seq Scan on distributed_table_1_570032 distributed_table_xxx (actual rows=1 loops=1) +CREATE TYPE multi_explain.int_wrapper_type AS (int_field int); +CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type); +SELECT create_distributed_table('tbl', 'a'); + +EXPLAIN :default_analyze_flags SELECT * FROM tbl; +Custom Scan (Citus Adaptive) (actual rows=0 loops=1) + Task Count: 2 + Tuple data received from nodes: 0 bytes + Tasks Shown: One of 2 + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1) SET client_min_messages TO ERROR; DROP SCHEMA multi_explain CASCADE; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 31f79c83a..2e23f6c63 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1002,6 +1002,7 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_calculate_gpid(integer,integer) bigint | function citus_check_cluster_node_health() SETOF record | function citus_check_connection_to_node(text,integer) boolean + | function citus_coordinator_nodeid() integer | function citus_disable_node(text,integer,boolean) void | function citus_finalize_upgrade_to_citus11(boolean) boolean | function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) void @@ -1028,7 +1029,7 @@ SELECT * FROM multi_extension.print_extension_changes(); | function worker_partition_query_result(text,text,integer,citus.distribution_type,text[],text[],boolean,boolean,boolean) SETOF record | table pg_dist_object | view citus_stat_activity -(40 rows) +(41 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index ab0e62964..91a65dc02 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -644,6 +644,8 @@ ALTER INDEX p1_dist_col_idx RENAME TO p1_dist_col_idx_renamed; SET citus.log_remote_commands TO ON; CREATE TABLE p2(dist_col int NOT NULL, another_col int, partition_col timestamp NOT NULL, name text) USING columnar; ALTER TABLE parent_table ATTACH PARTITION p2 FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT alter_columnar_table_set('fix_idx_names.p2_915002', chunk_group_row_limit => 10000, stripe_row_limit => 150000, compression_level => 3, compression => 'zstd'); @@ -652,8 +654,6 @@ NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 59a2626af..e10dabf2b 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -631,6 +631,50 @@ BEGIN CREATE INDEX ON distributed_table(last_column); END; $BODY$ LANGUAGE plpgsql; +CREATE TABLE test_for_func( + a int +); +SELECT create_distributed_table('test_for_func', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- create a function that depends on a relation that depends on an extension +CREATE OR REPLACE FUNCTION function_on_table_depends_on_extension ( + p_table_name text) +RETURNS TABLE (LIKE pg_dist_partition) +AS $$ +BEGIN + RETURN QUERY + SELECT * FROM pg_dist_partition WHERE logicalrelid::regclass::text = p_table_name; +END; +$$ LANGUAGE plpgsql; +SELECT logicalrelid FROM function_on_table_depends_on_extension('test_for_func'); + logicalrelid +--------------------------------------------------------------------- + test_for_func +(1 row) + +-- create a function that depends on a relation that does not depend on an extension +CREATE TABLE local_test(a int); +CREATE OR REPLACE FUNCTION function_on_table_does_not_depend_on_extension ( + input int) +RETURNS TABLE (LIKE local_test) +AS $$ +BEGIN + RETURN QUERY + SELECT * FROM local_test WHERE a = input; +END; +$$ LANGUAGE plpgsql; +WARNING: "function function_on_table_does_not_depend_on_extension(integer)" has dependency to "table local_test" that is not in Citus' metadata +DETAIL: "function function_on_table_does_not_depend_on_extension(integer)" will be created only locally +HINT: Distribute "table local_test" first to distribute "function function_on_table_does_not_depend_on_extension(integer)" +SELECT * FROM function_on_table_does_not_depend_on_extension(5); + a +--------------------------------------------------------------------- +(0 rows) + -- hide plpgsql messages as they differ across pg versions \set VERBOSITY terse SELECT create_index_in_plpgsql(); diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index e85620826..749254292 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -1819,7 +1819,13 @@ SELECT pg_reload_conf(); t (1 row) -UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; +-- make sure that all the nodes have valid metadata before moving forward +SELECT wait_until_metadata_sync(60000); + wait_until_metadata_sync +--------------------------------------------------------------------- + +(1 row) + SELECT master_add_node('localhost', :worker_2_port); master_add_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_move_mx.out b/src/test/regress/expected/multi_move_mx.out index d447be98f..fec42b594 100644 --- a/src/test/regress/expected/multi_move_mx.out +++ b/src/test/regress/expected/multi_move_mx.out @@ -2,12 +2,6 @@ -- MULTI_MOVE_MX -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1550000; -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- Create mx test tables SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; @@ -220,15 +214,3 @@ HINT: Connect to the coordinator and run it again. DROP TABLE mx_table_1; DROP TABLE mx_table_2; DROP TABLE mx_table_3; -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -\c - - - :worker_2_port -DELETE FROM pg_dist_node; -DELETE FROM pg_dist_partition; -DELETE FROM pg_dist_shard; -DELETE FROM pg_dist_shard_placement; diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 469677c10..c0265b282 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -71,6 +71,10 @@ SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_name --------------------------------------------------------------------- (0 rows) +-- make sure that pg_class queries do not get blocked on table locks +begin; +lock table test_table in access exclusive mode; +prepare transaction 'take-aggressive-lock'; -- shards are hidden when using psql as application_name SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; relname @@ -78,6 +82,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name test_table (1 row) +commit prepared 'take-aggressive-lock'; -- now create an index \c - - - :master_port SET search_path TO 'mx_hide_shard_names'; @@ -398,6 +403,86 @@ SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'CiTuS.TeeN' ORDER CiTuS.TeeN | MyTenantIndex | index | postgres | TeeNTabLE.1!?! (1 row) +\c - - - :worker_1_port +-- re-connect to the worker node and show that only +-- client backends can filter shards +SET search_path TO "CiTuS.TeeN"; +-- Create the necessary test utility function +SET citus.enable_metadata_sync TO off; +CREATE OR REPLACE FUNCTION set_backend_type(backend_type int) + RETURNS void + LANGUAGE C STRICT + AS 'citus'; +RESET citus.enable_metadata_sync; +-- the shards and indexes do not show up +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + relname +--------------------------------------------------------------------- + test_index + test_table + test_table_102008 + test_table_2_1130000 +(4 rows) + +-- say, we set it to bgworker +-- the shards and indexes do not show up +SELECT set_backend_type(4); +NOTICE: backend type switched to: background worker + set_backend_type +--------------------------------------------------------------------- + +(1 row) + +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + relname +--------------------------------------------------------------------- + test_index + test_table + test_table_102008 + test_table_2_1130000 +(4 rows) + +-- or, we set it to walsender +-- the shards and indexes do show up +SELECT set_backend_type(9); +NOTICE: backend type switched to: walsender + set_backend_type +--------------------------------------------------------------------- + +(1 row) + +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + relname +--------------------------------------------------------------------- + test_index + test_index_1130000 + test_index_1130002 + test_table + test_table_102008 + test_table_102008_1130004 + test_table_102008_1130006 + test_table_1130000 + test_table_1130002 + test_table_2_1130000 +(10 rows) + +-- but, client backends to see the shards +SELECT set_backend_type(3); +NOTICE: backend type switched to: client backend + set_backend_type +--------------------------------------------------------------------- + +(1 row) + +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + relname +--------------------------------------------------------------------- + test_index + test_table + test_table_102008 + test_table_2_1130000 +(4 rows) + -- clean-up \c - - - :master_port -- show that common psql functions do not show shards diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index 1228666c4..2037a670f 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -538,7 +538,81 @@ SELECT run_command_on_workers($$DROP SCHEMA localschema;$$); (localhost,57638,f,"ERROR: schema ""localschema"" does not exist") (2 rows) +SET client_min_messages TO ERROR; +CREATE ROLE schema_owner WITH LOGIN; +RESET client_min_messages; +SELECT run_command_on_workers($$SET citus.enable_ddl_propagation TO OFF;CREATE ROLE schema_owner WITH LOGIN;RESET citus.enable_ddl_propagation;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,SET) + (localhost,57638,t,SET) +(2 rows) + RESET citus.enable_ddl_propagation; +-- create schema with the name of the owner +CREATE SCHEMA AUTHORIZATION schema_owner; +-- verify the schema is created on workers +SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_namespace WHERE nspname='schema_owner';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP SCHEMA schema_owner; +-- test CREATE SCHEMA .. GRANT ON SCHEMA commands +-- first create the role to be granted +SET citus.enable_ddl_propagation TO OFF; +SET client_min_messages TO ERROR; +CREATE ROLE role_to_be_granted WITH LOGIN; +RESET client_min_messages; +SELECT run_command_on_workers($$SET citus.enable_ddl_propagation TO OFF;CREATE ROLE role_to_be_granted WITH LOGIN;RESET citus.enable_ddl_propagation;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,SET) + (localhost,57638,t,SET) +(2 rows) + +RESET citus.enable_ddl_propagation; +CREATE SCHEMA old_schema; +CREATE SCHEMA new_schema + CREATE TABLE t1 (a int) + GRANT ALL ON SCHEMA old_schema TO role_to_be_granted + GRANT ALL ON SCHEMA new_schema TO role_to_be_granted; +-- the role should be granted on both the new and the old schema +SELECT nspacl FROM pg_namespace WHERE nspname='old_schema' OR nspname='new_schema'; + nspacl +--------------------------------------------------------------------- + {postgres=UC/postgres,role_to_be_granted=UC/postgres} + {postgres=UC/postgres,role_to_be_granted=UC/postgres} +(2 rows) + +-- verify on workers +SELECT run_command_on_workers($$SELECT nspacl FROM pg_namespace WHERE nspname='new_schema';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{postgres=UC/postgres,role_to_be_granted=UC/postgres}") + (localhost,57638,t,"{postgres=UC/postgres,role_to_be_granted=UC/postgres}") +(2 rows) + +SELECT run_command_on_workers($$SELECT nspacl FROM pg_namespace WHERE nspname='old_schema';$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"{postgres=UC/postgres,role_to_be_granted=UC/postgres}") + (localhost,57638,t,"{postgres=UC/postgres,role_to_be_granted=UC/postgres}") +(2 rows) + +-- verify the table t1 is created as a local pg table +-- this might be changed after some improvements on use_citus_managed_tables +-- if so, please verify that t1 is added to metadata +SELECT COUNT(*)=0 FROM pg_dist_partition WHERE logicalrelid='new_schema.t1'::regclass; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +DROP SCHEMA old_schema, new_schema CASCADE; +NOTICE: drop cascades to table new_schema.t1 DROP SCHEMA mx_old_schema CASCADE; DROP SCHEMA mx_new_schema CASCADE; NOTICE: drop cascades to table mx_new_schema.table_set_schema diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index aa5d10149..7bc282c51 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -281,6 +281,20 @@ WHERE colocationid IN (1 row) DROP TABLE replicate_reference_table_rollback; +-- confirm that there is just 1 node +SELECT count(*) FROM pg_dist_node; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- test whether we can create distributed objects on a single worker node +CREATE TABLE cp_test (a int, b text); +CREATE PROCEDURE ptest1(x text) +LANGUAGE SQL +AS $$ + INSERT INTO cp_test VALUES (1, x); +$$; -- test replicating a reference table when a new node added in TRANSACTION + COMMIT CREATE TABLE replicate_reference_table_commit(column1 int); SELECT create_reference_table('replicate_reference_table_commit'); diff --git a/src/test/regress/expected/multi_subquery_complex_queries.out b/src/test/regress/expected/multi_subquery_complex_queries.out index 5432f27c4..3d91da2a3 100644 --- a/src/test/regress/expected/multi_subquery_complex_queries.out +++ b/src/test/regress/expected/multi_subquery_complex_queries.out @@ -1274,16 +1274,8 @@ FROM ORDER BY user_id limit 50; - user_id | lastseen ---------------------------------------------------------------------- - 1 | Thu Nov 23 18:08:26.550729 2017 - 2 | Thu Nov 23 17:26:14.563216 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 6 | Thu Nov 23 18:08:26.550729 2017 -(6 rows) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables SELECT "some_users_data".user_id, lastseen FROM (SELECT 2 * user_id as user_id, max(time) AS lastseen @@ -1316,12 +1308,8 @@ FROM ORDER BY user_id limit 50; - user_id | lastseen ---------------------------------------------------------------------- - 4 | Thu Nov 23 17:26:14.563216 2017 - 6 | Thu Nov 23 18:08:26.550729 2017 -(2 rows) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables -- LATERAL JOINs used with INNER JOINs SET citus.subquery_pushdown to ON; NOTICE: Setting citus.subquery_pushdown flag is discouraged becuase it forces the planner to pushdown certain queries, skipping relevant correctness checks. @@ -1514,24 +1502,8 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: push down of limit count: 10 DEBUG: generating subplan XXX_2 for subquery SELECT filter_users_1.user_id, last_events_1."time" AS lastseen FROM ((SELECT user_where_1_1.user_id FROM ((SELECT users.user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_1 OPERATOR(pg_catalog.>) 2))) user_where_1_1 JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) user_where_1_join_1 ON ((user_where_1_1.user_id OPERATOR(pg_catalog.<>) user_where_1_join_1.user_id)))) filter_users_1 JOIN LATERAL (SELECT events.user_id, events."time" FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.user_id OPERATOR(pg_catalog.=) filter_users_1.user_id)) ORDER BY events."time" DESC LIMIT 1) last_events_1 ON (true)) ORDER BY last_events_1."time" DESC LIMIT 10 DEBUG: Router planner cannot handle multi-shard select queries -DEBUG: push down of limit count: 10 -DEBUG: generating subplan XXX_3 for subquery SELECT some_users_data.user_id, some_recent_users.lastseen FROM ((SELECT intermediate_result.user_id, intermediate_result.lastseen FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, lastseen timestamp without time zone)) some_recent_users JOIN LATERAL (SELECT users.user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.=) some_recent_users.user_id) AND (users.value_2 OPERATOR(pg_catalog.>) 4)) ORDER BY users.user_id LIMIT 1) some_users_data ON (true)) ORDER BY some_recent_users.lastseen DESC LIMIT 10 -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, lastseen FROM (SELECT intermediate_result.user_id, intermediate_result.lastseen FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, lastseen timestamp without time zone)) some_users ORDER BY user_id DESC, lastseen DESC LIMIT 10 -DEBUG: Creating router plan - user_id | lastseen ---------------------------------------------------------------------- - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 -(10 rows) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables SET citus.enable_repartition_joins to ON; SET client_min_messages TO DEBUG1; -- recursively planner since the inner JOIN is not on the partition key @@ -1590,23 +1562,8 @@ LIMIT 10; DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_1 FROM public.users_table users WHERE ((user_id OPERATOR(pg_catalog.>) 1) AND (user_id OPERATOR(pg_catalog.<) 4) AND (value_2 OPERATOR(pg_catalog.>) 3)) DEBUG: push down of limit count: 10 DEBUG: generating subplan XXX_2 for subquery SELECT filter_users_1.user_id, last_events_1."time" AS lastseen FROM ((SELECT user_where_1_1.user_id FROM ((SELECT users.user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.>) 1) AND (users.user_id OPERATOR(pg_catalog.<) 4) AND (users.value_1 OPERATOR(pg_catalog.>) 2))) user_where_1_1 JOIN (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) user_where_1_join_1 ON ((user_where_1_1.user_id OPERATOR(pg_catalog.=) user_where_1_join_1.value_1)))) filter_users_1 JOIN LATERAL (SELECT events.user_id, events."time" FROM public.events_table events WHERE ((events.user_id OPERATOR(pg_catalog.>) 1) AND (events.user_id OPERATOR(pg_catalog.<) 4) AND (events.user_id OPERATOR(pg_catalog.=) filter_users_1.user_id)) ORDER BY events."time" DESC LIMIT 1) last_events_1 ON (true)) ORDER BY last_events_1."time" DESC LIMIT 10 -DEBUG: push down of limit count: 10 -DEBUG: generating subplan XXX_3 for subquery SELECT some_users_data.user_id, some_recent_users.lastseen FROM ((SELECT intermediate_result.user_id, intermediate_result.lastseen FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, lastseen timestamp without time zone)) some_recent_users JOIN LATERAL (SELECT users.user_id FROM public.users_table users WHERE ((users.user_id OPERATOR(pg_catalog.=) some_recent_users.user_id) AND (users.value_2 OPERATOR(pg_catalog.>) 4)) ORDER BY users.user_id LIMIT 1) some_users_data ON (true)) ORDER BY some_recent_users.lastseen DESC LIMIT 10 -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, lastseen FROM (SELECT intermediate_result.user_id, intermediate_result.lastseen FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, lastseen timestamp without time zone)) some_users ORDER BY user_id DESC, lastseen DESC LIMIT 10 - user_id | lastseen ---------------------------------------------------------------------- - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 - 3 | Thu Nov 23 18:08:26.550729 2017 -(10 rows) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables SET citus.enable_repartition_joins to OFF; RESET client_min_messages; -- not supported since upper LATERAL JOIN is not equi join @@ -1716,20 +1673,8 @@ FROM ORDER BY user_id DESC, lastseen DESC LIMIT 10; - user_id | lastseen ---------------------------------------------------------------------- - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 - 5 | Thu Nov 23 17:26:14.563216 2017 -(10 rows) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables -- NESTED INNER JOINs SELECT count(*) AS value, "generated_group_field" @@ -2371,10 +2316,8 @@ FROM ORDER BY value_2 DESC, user_id DESC LIMIT 10; - user_id | value_2 ---------------------------------------------------------------------- -(0 rows) - +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables -- lets test some unsupported set operations -- not supported since we use INTERSECT SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index fb2fe49d8..4a2f68162 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -163,6 +163,13 @@ ALTER TABLE test_table ADD COLUMN id3 bigserial; ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers ALTER TABLE test_table ADD COLUMN id4 bigserial CHECK (id4 > 0); ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers +CREATE SEQUENCE pg_temp.temp_sequence; +CREATE TABLE table_with_temp_sequence ( + dist_key int, + seq_col bigint default nextval('pg_temp.temp_sequence') +); +SELECT create_distributed_table('table_with_temp_sequence', 'dist_key'); +ERROR: "table table_with_temp_sequence" has dependency on unsupported object "schema pg_temp_xxx" DROP TABLE test_table CASCADE; DROP SEQUENCE test_sequence_0; DROP SEQUENCE test_sequence_1; diff --git a/src/test/regress/expected/partitioning_issue_3970.out b/src/test/regress/expected/partitioning_issue_3970.out index c93f6a437..c5db76c47 100644 --- a/src/test/regress/expected/partitioning_issue_3970.out +++ b/src/test/regress/expected/partitioning_issue_3970.out @@ -65,7 +65,7 @@ ORDER BY 1,2,3; (18 rows) -- check the constraint names on the worker node --- verify that check constraınts do not have a shardId suffix +-- verify that check constraints do not have a shardId suffix \c - - - :worker_1_port SELECT relname, conname, pg_catalog.pg_get_constraintdef(con.oid, true) FROM pg_constraint con JOIN pg_class rel ON (rel.oid=con.conrelid) diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index aed913063..baf77f127 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -1407,7 +1407,7 @@ SELECT count(*) FROM pg14.foreign_table; -- should error out TRUNCATE foreign_table; -ERROR: truncating foreign tables that are added to metadata can only be excuted on the coordinator +ERROR: truncating foreign tables that are added to metadata can only be executed on the coordinator \c - - - :master_port -- cleanup set client_min_messages to error; diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 55515a542..41c012641 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -1,3 +1,11 @@ +-- print whether we're using version > 12 to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve; + version_above_twelve +--------------------------------------------------------------------- + t +(1 row) + CREATE SCHEMA "extension'test"; -- use a schema name with escape character SET search_path TO "extension'test"; @@ -184,8 +192,6 @@ SELECT create_reference_table('ref_table_2'); CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); -CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); @@ -194,22 +200,8 @@ $$); (localhost,57637,t,"CREATE TEXT SEARCH TEMPLATE") (1 row) -SELECT run_command_on_workers($$ CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE TEXT SEARCH DICTIONARY") -(1 row) - -SELECT run_command_on_workers($$ COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,COMMENT) -(1 row) - CREATE EXTENSION dict_int FROM unpackaged; ERROR: CREATE EXTENSION ... FROM is no longer supported SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$); @@ -224,7 +216,20 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam (localhost,57637,t,"") (1 row) --- and add the other node +-- adding the second node will fail as the text search template needs to be created manually +SELECT 1 from master_add_node('localhost', :worker_2_port); +ERROR: text search template "public.intdict_template" does not exist +CONTEXT: while executing command on localhost:xxxxx +-- create the text search template manually on the worker +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO false; +CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; +CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; +CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); +RESET citus.enable_metadata_sync; +\c - - - :master_port +SET client_min_messages TO WARNING; +-- add the second node now SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- @@ -344,6 +349,7 @@ SET search_path TO "extension'test"; -- check restriction for sequential execution -- enable it and see that create command errors but continues its execution by changing citus.multi_shard_modify_mode TO 'off BEGIN; + SET LOCAL citus.create_object_propagation TO deferred; CREATE TABLE some_random_table (a int); SELECT create_distributed_table('some_random_table', 'a'); create_distributed_table @@ -586,6 +592,7 @@ DROP TABLE test_extension_function; -- Test extension function altering distribution argument BEGIN; SET citus.shard_replication_factor = 1; +SET citus.multi_shard_modify_mode TO sequential; CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); create_distributed_table @@ -621,5 +628,10 @@ objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); (1 row) ROLLBACK; +-- Postgres already doesn't allow creating extensions in temp schema but +-- let's have a test for that to track any furher changes in postgres. +DROP EXTENSION isn CASCADE; +CREATE EXTENSION isn WITH SCHEMA pg_temp; +ERROR: schema "pg_temp" does not exist -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/expected/propagate_extension_commands_1.out b/src/test/regress/expected/propagate_extension_commands_1.out index 5ee03fe29..fcbde2156 100644 --- a/src/test/regress/expected/propagate_extension_commands_1.out +++ b/src/test/regress/expected/propagate_extension_commands_1.out @@ -1,3 +1,11 @@ +-- print whether we're using version > 12 to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve; + version_above_twelve +--------------------------------------------------------------------- + f +(1 row) + CREATE SCHEMA "extension'test"; -- use a schema name with escape character SET search_path TO "extension'test"; @@ -184,8 +192,6 @@ SELECT create_reference_table('ref_table_2'); CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); -CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); @@ -194,22 +200,8 @@ $$); (localhost,57637,t,"CREATE TEXT SEARCH TEMPLATE") (1 row) -SELECT run_command_on_workers($$ CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE TEXT SEARCH DICTIONARY") -(1 row) - -SELECT run_command_on_workers($$ COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,COMMENT) -(1 row) - CREATE EXTENSION dict_int FROM unpackaged; SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$); run_command_on_workers @@ -223,7 +215,27 @@ SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extnam (localhost,57637,t,1.0) (1 row) --- and add the other node +-- adding the second node will fail as the text search template needs to be created manually +SELECT 1 from master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- create the text search template manually on the worker +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO false; +CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; +ERROR: function "dintdict_init" already exists with same argument types +CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; +ERROR: function "dintdict_lexize" already exists with same argument types +CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); +ERROR: duplicate key value violates unique constraint "pg_ts_template_tmplname_index" +DETAIL: Key (tmplname, tmplnamespace)=(intdict_template, 2200) already exists. +RESET citus.enable_metadata_sync; +\c - - - :master_port +SET client_min_messages TO WARNING; +-- add the second node now SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- @@ -343,6 +355,7 @@ SET search_path TO "extension'test"; -- check restriction for sequential execution -- enable it and see that create command errors but continues its execution by changing citus.multi_shard_modify_mode TO 'off BEGIN; + SET LOCAL citus.create_object_propagation TO deferred; CREATE TABLE some_random_table (a int); SELECT create_distributed_table('some_random_table', 'a'); create_distributed_table @@ -585,6 +598,7 @@ DROP TABLE test_extension_function; -- Test extension function altering distribution argument BEGIN; SET citus.shard_replication_factor = 1; +SET citus.multi_shard_modify_mode TO sequential; CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); create_distributed_table @@ -620,5 +634,10 @@ objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); (1 row) ROLLBACK; +-- Postgres already doesn't allow creating extensions in temp schema but +-- let's have a test for that to track any furher changes in postgres. +DROP EXTENSION isn CASCADE; +CREATE EXTENSION isn WITH SCHEMA pg_temp; +ERROR: schema "pg_temp" does not exist -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/expected/propagate_statistics.out b/src/test/regress/expected/propagate_statistics.out index 1068fa6d2..8a1255406 100644 --- a/src/test/regress/expected/propagate_statistics.out +++ b/src/test/regress/expected/propagate_statistics.out @@ -15,6 +15,8 @@ SELECT create_distributed_table('test_stats', 'a'); (1 row) +CREATE STATISTICS pg_temp.s1 (dependencies) ON a, b FROM test_stats; +ERROR: "statistics object s1" has dependency on unsupported object "schema pg_temp_xxx" CREATE STATISTICS s1 (dependencies) ON a, b FROM test_stats; -- test for distributing an already existing statistics CREATE TABLE "test'stats2" ( diff --git a/src/test/regress/expected/sequences.out b/src/test/regress/expected/sequences.out new file mode 100644 index 000000000..f4bfca50a --- /dev/null +++ b/src/test/regress/expected/sequences.out @@ -0,0 +1,52 @@ +SET search_path TO sequences_schema; +-- see the renamed sequence object +select count(*) from pg_sequence where seqrelid = 'renamed_seq'::regclass; + count +--------------------------------------------------------------------- + 1 +(1 row) + +TRUNCATE seq_test_0; +INSERT INTO seq_test_0 VALUES (1); +-- verify that sequence works properly +select max(z) into maxval_z from seq_test_0; +select max(y) into maxval_y from seq_test_0; +select max+1=nextval('renamed_seq') as check_sanity from maxval_z; + check_sanity +--------------------------------------------------------------------- + t +(1 row) + +select max+1=nextval('seq_1') as check_sanity from maxval_y; + check_sanity +--------------------------------------------------------------------- + t +(1 row) + +TRUNCATE seq_test_0; +INSERT INTO seq_test_0 VALUES (199999, DEFAULT, DEFAULT); +drop table maxval_z; +select max(z) into maxval_z from seq_test_0; +SELECT pg_sequence_last_value('renamed_seq'::regclass) = max FROM maxval_z; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +TRUNCATE seq_test_0; +BEGIN; + INSERT INTO seq_test_0 VALUES (2); + -- verify that sequence works properly + select max(z)+1=nextval('renamed_seq') as check_sanity from seq_test_0 ; + check_sanity +--------------------------------------------------------------------- + t + (1 row) + + select max(y)+1=nextval('seq_1') as check_sanity from seq_test_0 ; + check_sanity +--------------------------------------------------------------------- + t +(1 row) + +COMMIT; diff --git a/src/test/regress/expected/sequences_create.out b/src/test/regress/expected/sequences_create.out new file mode 100644 index 000000000..f1e3e9270 --- /dev/null +++ b/src/test/regress/expected/sequences_create.out @@ -0,0 +1,28 @@ +CREATE SCHEMA sequences_schema; +SET search_path TO sequences_schema; +CREATE SEQUENCE seq_0; +ALTER SEQUENCE seq_0 AS smallint; +CREATE SEQUENCE seq_1; +ALTER SEQUENCE seq_1 AS bigint; +CREATE TABLE seq_test_0 (x bigint, y bigint); +SELECT create_distributed_table('seq_test_0','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO seq_test_0 SELECT 1, s FROM generate_series(1, 50) s; +SELECT * FROM seq_test_0 ORDER BY 1, 2 LIMIT 5; + x | y +--------------------------------------------------------------------- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 +(5 rows) + +ALTER TABLE seq_test_0 ADD COLUMN z bigint; +ALTER TABLE seq_test_0 ALTER COLUMN z SET DEFAULT nextval('seq_0'); +ALTER TABLE seq_test_0 ALTER COLUMN y SET DEFAULT nextval('seq_1'); +ALTER SEQUENCE seq_0 RENAME TO renamed_seq; diff --git a/src/test/regress/expected/sequences_with_different_types.out b/src/test/regress/expected/sequences_with_different_types.out new file mode 100644 index 000000000..3a26f6b7f --- /dev/null +++ b/src/test/regress/expected/sequences_with_different_types.out @@ -0,0 +1,102 @@ +CREATE SCHEMA sequences_with_different_types; +SET search_path TO sequences_with_different_types; +CREATE TYPE two_big_ints AS (a bigint, b bigint); +-- by default, sequences get bigint type +CREATE SEQUENCE bigint_sequence_1; +CREATE SEQUENCE bigint_sequence_2 START 10000; +CREATE SEQUENCE bigint_sequence_3 INCREMENT 10; +CREATE SEQUENCE bigint_sequence_4 MINVALUE 1000000; +CREATE SEQUENCE bigint_sequence_5; +CREATE SEQUENCE bigint_sequence_8; +CREATE TABLE table_1 +( + user_id bigint, + user_code_1 text DEFAULT (('CD'::text || lpad(nextval('bigint_sequence_1'::regclass)::text, 10, '0'::text))), + user_code_2 text DEFAULT nextval('bigint_sequence_2'::regclass)::text, + user_code_3 text DEFAULT (nextval('bigint_sequence_3'::regclass) + 1000)::text, + user_code_4 float DEFAULT nextval('bigint_sequence_4'::regclass), + user_code_5 two_big_ints DEFAULT (nextval('bigint_sequence_5'::regclass), nextval('bigint_sequence_5'::regclass)), + user_code_8 jsonb DEFAULT to_jsonb('test'::text) || to_jsonb(nextval('bigint_sequence_8'::regclass)) +); +SELECT create_distributed_table('table_1', 'user_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table_1 VALUES (1, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT), (2, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING *; + user_id | user_code_1 | user_code_2 | user_code_3 | user_code_4 | user_code_5 | user_code_8 +--------------------------------------------------------------------- + 1 | CD0000000001 | 10000 | 1001 | 1000000 | (1,2) | ["test", 1] + 2 | CD0000000002 | 10001 | 1011 | 1000001 | (3,4) | ["test", 2] +(2 rows) + +\c - - - :worker_1_port +SET search_path TO sequences_with_different_types; +INSERT INTO table_1 VALUES (3, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT), (4, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING *; + user_id | user_code_1 | user_code_2 | user_code_3 | user_code_4 | user_code_5 | user_code_8 +--------------------------------------------------------------------- + 3 | CD3940649673 | 3940649673949185 | 3940649673950185 | 3.94064967394918e+15 | (3940649673949185,3940649673949186) | ["test", 3940649673949185] + 4 | CD3940649673 | 3940649673949186 | 3940649673950195 | 3.94064967394919e+15 | (3940649673949187,3940649673949188) | ["test", 3940649673949186] +(2 rows) + +\c - - - :master_port +SET search_path TO sequences_with_different_types; +CREATE SEQUENCE bigint_sequence_6; +CREATE TABLE table_2 +( + user_id bigint, + user_code OID DEFAULT nextval('bigint_sequence_6'::regclass) +); +SELECT create_distributed_table('table_2', 'user_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- on the coordinator, the sequence starts from 0 +INSERT INTO table_2 VALUES (1, DEFAULT) RETURNING *; + user_id | user_code +--------------------------------------------------------------------- + 1 | 1 +(1 row) + +\c - - - :worker_1_port +SET search_path TO sequences_with_different_types; +-- this fails because on the workers the start value of the sequence +-- is greater than the largest value of an oid +INSERT INTO table_2 VALUES (1, DEFAULT) RETURNING *; +ERROR: OID out of range +\c - - - :master_port +SET search_path TO sequences_with_different_types; +CREATE SEQUENCE bigint_sequence_7; +CREATE TABLE table_3 +( + user_id bigint, + user_code boolean DEFAULT ((nextval('bigint_sequence_7'::regclass)%2)::int)::boolean +); +SELECT create_distributed_table('table_3', 'user_id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table_3 VALUES (1, DEFAULT), (2, DEFAULT) RETURNING *; + user_id | user_code +--------------------------------------------------------------------- + 1 | t + 2 | f +(2 rows) + +\c - - - :worker_1_port +SET search_path TO sequences_with_different_types; +INSERT INTO table_3 VALUES (3, DEFAULT), (4, DEFAULT) RETURNING *; + user_id | user_code +--------------------------------------------------------------------- + 3 | t + 4 | f +(2 rows) + +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA sequences_with_different_types CASCADE; diff --git a/src/test/regress/expected/sqlancer_failures.out b/src/test/regress/expected/sqlancer_failures.out index 47a83665f..234eadd87 100644 --- a/src/test/regress/expected/sqlancer_failures.out +++ b/src/test/regress/expected/sqlancer_failures.out @@ -194,6 +194,26 @@ ERROR: cannot pushdown the subquery DETAIL: Complex subqueries, CTEs and local tables cannot be in the outer part of an outer join with a distributed table -- drop existing sqlancer tables before next tests DROP TABLE t0, t1, t2, t3, t4 CASCADE; +CREATE TABLE tbl1(a REAL, b FLOAT, c money); +CREATE TABLE tbl2(a REAL, b FLOAT, c money); +SELECT create_distributed_table('tbl1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('tbl2', 'b'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO tbl1 VALUES(1, 1, 1); +SET citus.enable_repartition_joins to ON; +SELECT * FROM tbl1, tbl2 WHERE tbl2.c=tbl1.c; +ERROR: no hash function defined for type money +CONTEXT: while executing command on localhost:xxxxx +DROP TABLE tbl1, tbl2 CASCADE; CREATE TABLE IF NOT EXISTS t0(c0 TEXT CHECK (TRUE), c1 money ) WITH (autovacuum_vacuum_threshold=1180014707, autovacuum_freeze_table_age=13771154, autovacuum_vacuum_cost_delay=23, autovacuum_analyze_threshold=1935153914, autovacuum_freeze_min_age=721733768, autovacuum_enabled=0, autovacuum_vacuum_cost_limit=9983); CREATE UNLOGGED TABLE IF NOT EXISTS t1(LIKE t0); CREATE TABLE t2(LIKE t0 INCLUDING INDEXES); diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out index d1dbf6511..39e57326e 100644 --- a/src/test/regress/expected/text_search.out +++ b/src/test/regress/expected/text_search.out @@ -1,7 +1,7 @@ CREATE SCHEMA text_search; CREATE SCHEMA text_search2; SET search_path TO text_search; --- create a new configruation from scratch +-- create a new configuration from scratch CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); CREATE TABLE t1(id int, name text); CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); @@ -37,21 +37,19 @@ SELECT create_distributed_table('t1', 'name'); (1 row) +-- verify that we can change the object +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can be set right now'; +COMMIT; SELECT * FROM run_command_on_workers($$ SELECT obj_description('text_search.my_text_search_config'::regconfig); $$) ORDER BY 1,2; - nodename | nodeport | success | result + nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | on demand propagation of text search object with a comment - localhost | 57638 | t | on demand propagation of text search object with a comment + localhost | 57637 | t | this comment can be set right now + localhost | 57638 | t | this comment can be set right now (2 rows) --- verify that changing anything on a managed TEXT SEARCH CONFIGURATION fails after parallel execution -COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can''t be set right now'; -ERROR: cannot run text search configuration command because there was a parallel operation on a distributed table in the transaction -DETAIL: When running command on/for a distributed text search configuration, Citus needs to perform all operations over a single connection per node to ensure consistency. -HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" -ABORT; +DROP TABLE t1; -- create an index on an already distributed table BEGIN; CREATE TEXT SEARCH CONFIGURATION my_text_search_config2 ( parser = default ); @@ -64,6 +62,7 @@ SELECT create_distributed_table('t1', 'name'); (1 row) CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config2'::regconfig, (COALESCE(name, ''::character varying))::text)); +COMMIT; SELECT * FROM run_command_on_workers($$ SELECT obj_description('text_search.my_text_search_config2'::regconfig); $$) ORDER BY 1,2; @@ -73,7 +72,7 @@ $$) ORDER BY 1,2; localhost | 57638 | t | on demand propagation of text search object with a comment 2 (2 rows) -ABORT; +DROP TABLE t1; -- should be able to create a configuration based on a copy of an existing configuration CREATE TEXT SEARCH CONFIGURATION french_noaccent ( COPY = french ); CREATE TABLE t2(id int, name text); @@ -151,14 +150,14 @@ $$) ORDER BY 1,2; ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciihword; NOTICE: mapping for token type "asciihword" does not exist, skipping -- Comment on a text search configuration -COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butcherd to test all edge cases'; +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butchered to test all edge cases'; SELECT * FROM run_command_on_workers($$ SELECT obj_description('text_search.french_noaccent'::regconfig); $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | a text configuration that is butcherd to test all edge cases - localhost | 57638 | t | a text configuration that is butcherd to test all edge cases + localhost | 57637 | t | a text configuration that is butchered to test all edge cases + localhost | 57638 | t | a text configuration that is butchered to test all edge cases (2 rows) -- Remove a comment @@ -504,6 +503,211 @@ SELECT create_distributed_table('sensors', 'measureid'); (1 row) +-- create a new dictionary from scratch +CREATE TEXT SEARCH DICTIONARY my_english_dict ( + template = snowball, + language = english, + stopwords = english +); +-- verify that the dictionary definition is the same in all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'my_english_dict'; +$$); + result +--------------------------------------------------------------------- + (my_english_dict,text_search,postgres,snowball,"language = 'english', stopwords = 'english'") + (my_english_dict,text_search,postgres,snowball,"language = 'english', stopwords = 'english'") + (my_english_dict,text_search,postgres,snowball,"language = 'english', stopwords = 'english'") +(3 rows) + +-- use the new dictionary in a configuration mapping +CREATE TEXT SEARCH CONFIGURATION my_english_config ( COPY = english ); +ALTER TEXT SEARCH CONFIGURATION my_english_config ALTER MAPPING FOR asciiword WITH my_english_dict; +-- verify that the dictionary is available on the worker nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.my_english_config', 'The Brightest supernovaes') WHERE alias = 'asciiword' LIMIT 1; +$$); + result +--------------------------------------------------------------------- + (asciiword,text_search.my_english_dict) + (asciiword,text_search.my_english_dict) + (asciiword,text_search.my_english_dict) +(3 rows) + +-- comment on a text search dictionary +COMMENT ON TEXT SEARCH DICTIONARY my_english_dict IS 'a text search dictionary that is butchered to test all edge cases'; +SELECT result FROM run_command_on_all_nodes($$ + SELECT obj_description('text_search.my_english_dict'::regdictionary); +$$); + result +--------------------------------------------------------------------- + a text search dictionary that is butchered to test all edge cases + a text search dictionary that is butchered to test all edge cases + a text search dictionary that is butchered to test all edge cases +(3 rows) + +-- remove a comment +COMMENT ON TEXT SEARCH DICTIONARY my_english_dict IS NULL; +SELECT result FROM run_command_on_all_nodes($$ + SELECT obj_description('text_search.my_english_dict'::regdictionary); +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +-- test various ALTER TEXT SEARCH DICTIONARY commands +ALTER TEXT SEARCH DICTIONARY my_english_dict RENAME TO my_turkish_dict; +ALTER TEXT SEARCH DICTIONARY my_turkish_dict (language = turkish, stopwords); +ALTER TEXT SEARCH DICTIONARY my_turkish_dict OWNER TO text_search_owner; +ALTER TEXT SEARCH DICTIONARY my_turkish_dict SET SCHEMA "Text Search Requiring Quote's"; +-- verify that the dictionary definition is the same in all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'my_turkish_dict'; +$$); + result +--------------------------------------------------------------------- + (my_turkish_dict,"""Text Search Requiring Quote's""",text_search_owner,snowball,"language = 'turkish'") + (my_turkish_dict,"""Text Search Requiring Quote's""",text_search_owner,snowball,"language = 'turkish'") + (my_turkish_dict,"""Text Search Requiring Quote's""",text_search_owner,snowball,"language = 'turkish'") +(3 rows) + +-- verify that the configuration dictionary is changed in all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.my_english_config', 'The Brightest supernovaes') WHERE alias = 'asciiword' LIMIT 1; +$$); + result +--------------------------------------------------------------------- + (asciiword,"""Text Search Requiring Quote's"".my_turkish_dict") + (asciiword,"""Text Search Requiring Quote's"".my_turkish_dict") + (asciiword,"""Text Search Requiring Quote's"".my_turkish_dict") +(3 rows) + +-- before testing drops, check that the dictionary exists on all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT '"Text Search Requiring Quote''s".my_turkish_dict'::regdictionary; +$$); + result +--------------------------------------------------------------------- + "Text Search Requiring Quote's".my_turkish_dict + "Text Search Requiring Quote's".my_turkish_dict + "Text Search Requiring Quote's".my_turkish_dict +(3 rows) + +ALTER TEXT SEARCH DICTIONARY "Text Search Requiring Quote's".my_turkish_dict SET SCHEMA text_search; +-- verify that we can drop the dictionary only with cascade option +DROP TEXT SEARCH DICTIONARY my_turkish_dict; +ERROR: cannot drop text search dictionary my_turkish_dict because other objects depend on it +DETAIL: text search configuration my_english_config depends on text search dictionary my_turkish_dict +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TEXT SEARCH DICTIONARY my_turkish_dict CASCADE; +NOTICE: drop cascades to text search configuration my_english_config +-- verify that it is dropped now +SELECT result FROM run_command_on_all_nodes($$ + SELECT 'my_turkish_dict'::regdictionary; +$$); + result +--------------------------------------------------------------------- + ERROR: text search dictionary "my_turkish_dict" does not exist + ERROR: text search dictionary "my_turkish_dict" does not exist + ERROR: text search dictionary "my_turkish_dict" does not exist +(3 rows) + +-- test different templates that are used in dictionaries +CREATE TEXT SEARCH DICTIONARY simple_dict ( + TEMPLATE = pg_catalog.simple, + STOPWORDS = english, + accept = false +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'simple_dict'; +$$); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TEXT SEARCH DICTIONARY synonym_dict ( + template=synonym, + synonyms='synonym_sample', + casesensitive=1 +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'synonym_dict'; +$$); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TEXT SEARCH DICTIONARY thesaurus_dict ( + TEMPLATE = thesaurus, + DictFile = thesaurus_sample, + Dictionary = pg_catalog.english_stem +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'thesaurus_dict'; +$$); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TEXT SEARCH DICTIONARY ispell_dict ( + TEMPLATE = ispell, + DictFile = ispell_sample, + AffFile = ispell_sample, + Stopwords = english +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'ispell_dict'; +$$); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE TEXT SEARCH DICTIONARY snowball_dict ( + TEMPLATE = snowball, + Language = english, + StopWords = english +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'snowball_dict'; +$$); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- will skip trying to propagate the text search configuration due to temp schema +CREATE TEXT SEARCH CONFIGURATION pg_temp.temp_text_search_config ( parser = default ); +WARNING: "text search configuration pg_temp_xxx.temp_text_search_config" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "text search configuration pg_temp_xxx.temp_text_search_config" will be created only locally +-- will skip trying to propagate the text search dictionary due to temp schema +CREATE TEXT SEARCH DICTIONARY pg_temp.temp_text_search_dict ( + template = snowball, + language = english, + stopwords = english +); +WARNING: "text search dictionary pg_temp_xxx.temp_text_search_dict" has dependency on unsupported object "schema pg_temp_xxx" +DETAIL: "text search dictionary pg_temp_xxx.temp_text_search_dict" will be created only locally SET client_min_messages TO 'warning'; DROP SCHEMA text_search, text_search2, "Text Search Requiring Quote's" CASCADE; DROP ROLE text_search_owner; diff --git a/src/test/regress/expected/unsupported_lateral_subqueries.out b/src/test/regress/expected/unsupported_lateral_subqueries.out new file mode 100644 index 000000000..9c613a199 --- /dev/null +++ b/src/test/regress/expected/unsupported_lateral_subqueries.out @@ -0,0 +1,208 @@ +CREATE SCHEMA unsupported_lateral_joins; +SET search_path TO unsupported_lateral_joins; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 13354100; +CREATE TABLE test(x bigint, y bigint); +SELECT create_distributed_table('test','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE ref(a bigint, b bigint); +SELECT create_reference_table('ref'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +insert into test(x, y) SELECT 1, i FROM generate_series(1, 10) i; +insert into test(x, y) SELECT 3, i FROM generate_series(11, 40) i; +insert into test(x, y) SELECT i, 1 FROM generate_series(1, 10) i; +insert into test(x, y) SELECT i, 3 FROM generate_series(11, 40) i; +insert into ref(a, b) SELECT i, 1 FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT i, 3 FROM generate_series(11, 40) i; +insert into ref(a, b) SELECT 1, i FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT 3, i FROM generate_series(11, 40) i; +-- The following queries return wrong results when pushed down. Instead of +-- returning 2 rows, for each row in ref table. They would return (2 * number +-- of shards) rows for each row in the reference table. See issue #5327 +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a reference table (ref) +SELECT count(*) +FROM (VALUES (1), (3)) ref(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a VALUES clause +WITH ref(a) as (select y from test) +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables +SELECT count(*) +FROM generate_series(1, 3) ref(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a table function (ref) +SELECT count(*) +FROM (SELECT generate_series(1, 3)) ref(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a subquery without FROM (ref) +-- make sure right error message is chosen +SELECT count(*) +FROM ref ref_table, + (VALUES (1), (3)) rec_values(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_table.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a reference table (ref_table) +SELECT count(*) +FROM ref as ref_table, + (VALUES (1), (3)) ref_values(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_values.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a VALUES clause +SELECT count(*) FROM + ref ref_outer, + LATERAL ( + SELECT * FROM + LATERAL ( SELECT * + FROM ref ref_inner, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_outer.a + LIMIT 2 + ) q + ) q2 + ) q3; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a reference table (ref_outer) +SELECT count(*) FROM + ref ref_outer, + LATERAL ( + SELECT * FROM + LATERAL ( SELECT * + FROM ref ref_inner, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_inner.a + LIMIT 2 + ) q + ) q2 + ) q3; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a reference table (ref_inner) +-- Since this only correlates on the distribution column, this can be safely +-- pushed down. But this is currently considered to hard to detect, so we fail. +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.x + FROM test + WHERE + test.x = ref.a + LIMIT 2 + ) q; +ERROR: cannot push down this subquery +DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from a reference table (ref) +-- Would require repartitioning to work with subqueries +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.y + LIMIT 2 + ) q ; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +-- Too complex joins for Citus to handle currently +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = ref.a + LIMIT 2 + ) q +; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +-- Would require repartitioning to work with subqueries +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.y = ref.a + LIMIT 2 + ) q +; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +SET client_min_messages TO WARNING; +DROP SCHEMA unsupported_lateral_joins CASCADE; diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index afd30523c..f84553317 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -44,6 +44,7 @@ ORDER BY 1; function citus_check_connection_to_node(text,integer) function citus_cleanup_orphaned_shards() function citus_conninfo_cache_invalidate() + function citus_coordinator_nodeid() function citus_copy_shard_placement(bigint,text,integer,text,integer,boolean,citus.shard_transfer_mode) function citus_create_restore_point(text) function citus_disable_node(text,integer,boolean) @@ -282,5 +283,5 @@ ORDER BY 1; view citus_stat_statements view pg_dist_shard_placement view time_partitions -(266 rows) +(267 rows) diff --git a/src/test/regress/expected/views_create.out b/src/test/regress/expected/views_create.out new file mode 100644 index 000000000..acc8f002f --- /dev/null +++ b/src/test/regress/expected/views_create.out @@ -0,0 +1,75 @@ +CREATE SCHEMA views_create; +SET search_path TO views_create; +CREATE TABLE view_test_table(a INT NOT NULL PRIMARY KEY, b BIGINT, c text); +CREATE OR REPLACE VIEW select_filtered_view AS + SELECT * FROM view_test_table WHERE c = 'testing' + WITH CASCADED CHECK OPTION; +CREATE OR REPLACE VIEW select_all_view AS + SELECT * FROM view_test_table + WITH LOCAL CHECK OPTION; +CREATE OR REPLACE VIEW count_view AS + SELECT COUNT(*) FROM view_test_table; +SELECT create_distributed_table('view_test_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO view_test_table VALUES (1,1,'testing'), (2,1,'views'); +SELECT * FROM count_view; + count +--------------------------------------------------------------------- + 2 +(1 row) + +SELECT COUNT(*) FROM count_view; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT COUNT(*) FROM select_all_view; + count +--------------------------------------------------------------------- + 2 +(1 row) + +SELECT * FROM select_filtered_view; + a | b | c +--------------------------------------------------------------------- + 1 | 1 | testing +(1 row) + +-- dummy temp recursive view +CREATE TEMP RECURSIVE VIEW recursive_defined_non_recursive_view(c) AS (SELECT 1); +CREATE MATERIALIZED VIEW select_all_matview AS + SELECT * FROM view_test_table + WITH DATA; +CREATE MATERIALIZED VIEW IF NOT EXISTS select_filtered_matview AS + SELECT * FROM view_test_table WHERE c = 'views' + WITH NO DATA; +REFRESH MATERIALIZED VIEW select_filtered_matview; +SELECT COUNT(*) FROM select_all_matview; + count +--------------------------------------------------------------------- + 2 +(1 row) + +SELECT * FROM select_filtered_matview; + a | b | c +--------------------------------------------------------------------- + 2 | 1 | views +(1 row) + +SELECT COUNT(*) FROM select_all_view a JOIN select_filtered_matview b ON a.c=b.c; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT COUNT(*) FROM select_all_view a JOIN view_test_table b ON a.c=b.c; + count +--------------------------------------------------------------------- + 2 +(1 row) + diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 8f849d5dd..9eb45fe49 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -1,8 +1,6 @@ test: isolation_add_remove_node -test: isolation_turn_mx_off test: isolation_update_node test: isolation_update_node_lock_writes -test: isolation_turn_mx_on test: isolation_ensure_dependency_activate_node test: isolation_add_node_vs_reference_table_operations test: isolation_create_table_vs_add_remove_node @@ -46,6 +44,7 @@ test: isolation_multi_shard_modify_vs_all test: isolation_modify_with_subquery_vs_dml test: isolation_hash_copy_vs_all test: isolation_range_copy_vs_all +test: isolation_reference_copy_vs_all test: isolation_partitioned_copy_vs_all test: isolation_select_vs_all test: isolation_insert_vs_all @@ -60,6 +59,7 @@ test: isolation_ddl_vs_all test: isolation_get_all_active_transactions test: isolation_validate_vs_insert test: isolation_insert_select_conflict +test: isolation_ref2ref_foreign_keys test: shared_connection_waits test: isolation_cancellation test: isolation_max_client_connections @@ -95,8 +95,4 @@ test: isolation_replicated_dist_on_mx test: isolation_replicate_reference_tables_to_coordinator test: isolation_multiuser_locking -# MXless tests test: isolation_check_mx -test: isolation_turn_mx_off -test: isolation_reference_copy_vs_all -test: isolation_ref2ref_foreign_keys diff --git a/src/test/regress/multi_mx_schedule b/src/test/regress/multi_mx_schedule index 973c3bf05..ff5cccf11 100644 --- a/src/test/regress/multi_mx_schedule +++ b/src/test/regress/multi_mx_schedule @@ -66,6 +66,7 @@ test: metadata_sync_helpers # test that no tests leaked intermediate results. This should always be last test: ensure_no_intermediate_data_leak +test: check_mx # --------- # ensures that we never leak any connection counts diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 49d54592c..5b0d6b087 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -53,7 +53,7 @@ test: subqueries_deep subquery_view subquery_partitioning subqueries_not_support test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append test: subquery_prepared_statements test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins -test: cte_inline recursive_view_local_table values +test: cte_inline recursive_view_local_table values sequences_with_different_types test: pg13 pg12 # run pg14 sequentially as it syncs metadata test: pg14 @@ -80,7 +80,7 @@ test: multi_array_agg multi_limit_clause multi_orderby_limit_pushdown test: multi_jsonb_agg multi_jsonb_object_agg multi_json_agg multi_json_object_agg bool_agg ch_bench_having chbenchmark_all_queries expression_reference_join anonymous_columns test: ch_bench_subquery_repartition subscripting_op test: multi_agg_type_conversion multi_count_type_conversion recursive_relation_planning_restriction_pushdown -test: multi_partition_pruning single_hash_repartition_join +test: multi_partition_pruning single_hash_repartition_join unsupported_lateral_subqueries test: multi_join_pruning multi_hash_pruning intermediate_result_pruning test: multi_null_minmax_value_pruning cursors test: modification_correctness diff --git a/src/test/regress/operations_schedule b/src/test/regress/operations_schedule index d12ccf1b3..2692f212f 100644 --- a/src/test/regress/operations_schedule +++ b/src/test/regress/operations_schedule @@ -8,3 +8,4 @@ test: multi_move_mx test: shard_move_deferred_delete test: multi_colocated_shard_rebalance test: ignoring_orphaned_shards +test: check_mx diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index ebbb15371..9b03b88d8 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -669,14 +669,14 @@ if (!$conninfo) # Create new data directories, copy workers for speed # --allow-group-access is used to ensure we set permissions on private keys # correctly - system(catfile("$bindir", "initdb"), ("--nosync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, $MASTERDIR, "data"))) == 0 + system(catfile("$bindir", "initdb"), ("--no-sync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, $MASTERDIR, "data"))) == 0 or die "Could not create $MASTERDIR data directory"; if ($usingWindows) { for my $port (@workerPorts) { - system(catfile("$bindir", "initdb"), ("--nosync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, "worker.$port", "data"))) == 0 + system(catfile("$bindir", "initdb"), ("--no-sync", "--allow-group-access", "-U", $user, "--encoding", "UTF8", catfile($TMP_CHECKDIR, "worker.$port", "data"))) == 0 or die "Could not create worker data directory"; } } diff --git a/src/test/regress/spec/isolation_drop_vs_all.spec b/src/test/regress/spec/isolation_drop_vs_all.spec index 37015b111..c970567e0 100644 --- a/src/test/regress/spec/isolation_drop_vs_all.spec +++ b/src/test/regress/spec/isolation_drop_vs_all.spec @@ -9,12 +9,12 @@ setup SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; - CREATE SCHEMA drop_tests - CREATE TABLE drop_hash(id integer, data text); + CREATE SCHEMA drop_tests; + CREATE TABLE drop_tests.drop_hash(id integer, data text); SELECT create_distributed_table('drop_tests.drop_hash', 'id'); - CREATE SCHEMA drop_tests_2 - CREATE TABLE drop_hash_2(id integer, data text); + CREATE SCHEMA drop_tests_2; + CREATE TABLE drop_tests_2.drop_hash_2(id integer, data text); SELECT create_distributed_table('drop_tests_2.drop_hash_2', 'id'); } diff --git a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec index 30a4e6d21..5891c153a 100644 --- a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec +++ b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec @@ -36,7 +36,7 @@ step "s1-begin" step "s1-add-worker" { - SELECT 1 FROM master_add_node('localhost', 57638); + SELECT 1 FROM master_add_node('localhost', 57638); } step "s1-commit" @@ -58,7 +58,7 @@ step "s1-print-distributed-objects" SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created @@ -83,23 +83,23 @@ step "s2-create-schema" step "s2-create-table" { - CREATE TABLE t1 (a int, b int); + CREATE TABLE t1 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); } step "s2-create-type" { - CREATE TYPE tt1 AS (a int, b int); + CREATE TYPE tt1 AS (a int, b int); } step "s2-create-table-with-type" { - CREATE TABLE t1 (a int, b tt1); + CREATE TABLE t1 (a int, b tt1); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t1', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t1', 'a'); } step "s2-distribute-function" @@ -110,12 +110,12 @@ step "s2-distribute-function" step "s2-begin" { - BEGIN; + BEGIN; } step "s2-commit" { - COMMIT; + COMMIT; } // prints from session 2 are run at the end when the worker has already been added by the @@ -130,7 +130,7 @@ step "s2-print-distributed-objects" SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$); -- print if the type has been created - SELECT count(*) FROM pg_type where typname = 'tt1'; + SELECT count(*) FROM pg_type where typname = 'tt1'; SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$); -- print if the function has been created @@ -148,10 +148,10 @@ step "s3-use-schema" step "s3-create-table" { - CREATE TABLE t2 (a int, b int); + CREATE TABLE t2 (a int, b int); -- session needs to have replication factor set to 1, can't do in setup - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('t2', 'a'); + SET citus.shard_replication_factor TO 1; + SELECT create_distributed_table('t2', 'a'); } step "s3-wait-for-metadata-sync" @@ -168,12 +168,12 @@ step "s3-create-schema2" step "s3-begin" { - BEGIN; + BEGIN; } step "s3-commit" { - COMMIT; + COMMIT; } step "s3-drop-coordinator-schemas" @@ -193,14 +193,14 @@ step "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s1-commit" "s2-create-table" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s1-commit" "s2-create-table" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s1-add-worker" "s2-create-table" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // concurrency tests with multi schema distribution permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s1-commit" "s2-create-table" "s2-commit" "s3-create-table" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // type and schema tests permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" diff --git a/src/test/regress/spec/isolation_metadata_sync_vs_all.spec b/src/test/regress/spec/isolation_metadata_sync_vs_all.spec index 80cfe2e33..8f60d0c6d 100644 --- a/src/test/regress/spec/isolation_metadata_sync_vs_all.spec +++ b/src/test/regress/spec/isolation_metadata_sync_vs_all.spec @@ -110,8 +110,8 @@ step "s2-create-dist-table" step "s2-create-schema" { - CREATE SCHEMA dist_schema - CREATE TABLE dist_table_in_schema(id int, data int); + CREATE SCHEMA dist_schema; + CREATE TABLE dist_schema.dist_table_in_schema(id int, data int); SELECT create_distributed_table('dist_schema.dist_table_in_schema', 'id'); } diff --git a/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec b/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec index 6137df058..d92ee5357 100644 --- a/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec +++ b/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec @@ -1,5 +1,8 @@ setup { + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table_1'); @@ -17,6 +20,7 @@ setup teardown { DROP TABLE ref_table_1, ref_table_2, ref_table_3; + SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_reference_copy_vs_all.spec b/src/test/regress/spec/isolation_reference_copy_vs_all.spec index 1ee681b81..b327230d9 100644 --- a/src/test/regress/spec/isolation_reference_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_reference_copy_vs_all.spec @@ -5,6 +5,8 @@ // create append distributed table to test behavior of COPY in concurrent operations setup { + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE reference_copy(id integer, data text, int_data int); SELECT create_reference_table('reference_copy'); @@ -14,6 +16,7 @@ setup teardown { DROP TABLE IF EXISTS reference_copy CASCADE; + SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_turn_mx_off.spec b/src/test/regress/spec/isolation_turn_mx_off.spec deleted file mode 100644 index f80fc0a1e..000000000 --- a/src/test/regress/spec/isolation_turn_mx_off.spec +++ /dev/null @@ -1,18 +0,0 @@ -session "s1" - -step "disable-mx-by-default" -{ - ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; -} - -step "reload" -{ - SELECT pg_reload_conf(); -} - -step "stop-metadata-sync" -{ - SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; -} - -permutation "disable-mx-by-default" "reload" "stop-metadata-sync" diff --git a/src/test/regress/spec/isolation_turn_mx_on.spec b/src/test/regress/spec/isolation_turn_mx_on.spec deleted file mode 100644 index 5e35c13e5..000000000 --- a/src/test/regress/spec/isolation_turn_mx_on.spec +++ /dev/null @@ -1,18 +0,0 @@ -session "s1" - -step "enable-mx-by-default" -{ - ALTER SYSTEM SET citus.enable_metadata_sync TO ON; -} - -step "reload" -{ - SELECT pg_reload_conf(); -} - -step "start-metadata-sync" -{ - SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; -} - -permutation "enable-mx-by-default" "reload" "start-metadata-sync" diff --git a/src/test/regress/spec/isolation_update_node.spec b/src/test/regress/spec/isolation_update_node.spec index a6e108528..5ae6fcaa6 100644 --- a/src/test/regress/spec/isolation_update_node.spec +++ b/src/test/regress/spec/isolation_update_node.spec @@ -8,6 +8,7 @@ setup teardown { + DROP TABLE IF EXISTS test; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT nodeid, nodename, nodeport from pg_dist_node ORDER BY 1 DESC; } @@ -103,43 +104,47 @@ step "s2-execute-prepared" { EXECUTE foo; } -step "s2-verify-metadata" -{ - SELECT nodeid, groupid, nodename, nodeport FROM pg_dist_node ORDER BY nodeid; - SELECT master_run_on_worker( - ARRAY['localhost'], ARRAY[57638], - ARRAY['SELECT jsonb_agg(ROW(nodeid, groupid, nodename, nodeport) ORDER BY nodeid) FROM pg_dist_node'], - false); -} - -step "s2-start-metadata-sync-node-2" -{ - SELECT start_metadata_sync_to_node('localhost', 57638); -} - -step "s2-drop-table" { - DROP TABLE test; -} - step "s2-abort" { ABORT; } +session "s3" + +step "s3-update-node-1-back" +{ + SELECT 1 FROM master_update_node( + (select nodeid from pg_dist_node where nodeport = 58637), + 'localhost', + 57637); +} + +step "s3-update-node-2-back" +{ + SELECT 1 FROM master_update_node( + (select nodeid from pg_dist_node where nodeport = 58638), + 'localhost', + 57638); +} + + +// since we update the nodes to unexistent nodes we break metadata, so here we fix it manually +step "s3-manually-fix-metadata" +{ + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57637; + UPDATE pg_dist_node SET metadatasynced = 't' WHERE nodeport = 57638; + SELECT start_metadata_sync_to_node('localhost', 57637); + SELECT start_metadata_sync_to_node('localhost', 57638); +} + + // session 1 updates node 1, session 2 updates node 2, should be ok -permutation "s1-begin" "s1-update-node-1" "s2-update-node-2" "s1-commit" "s1-show-nodes" +permutation "s1-begin" "s1-update-node-1" "s2-update-node-2" "s1-commit" "s1-show-nodes" "s3-update-node-1-back" "s3-update-node-2-back" "s3-manually-fix-metadata" // sessions 1 updates node 1, session 2 tries to do the same -permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-update-node-1" "s1-commit" "s2-abort" "s1-show-nodes" - -// master_update_node should block start_metadata_sync_to_node. Note that we -// cannot run start_metadata_sync_to_node in a transaction, so we're not -// testing the reverse order here. -// Having different result on coordinator and worker is expected for now since -// we run test after disabling mx. -permutation "s1-begin" "s1-update-node-1" "s2-start-metadata-sync-node-2" "s1-commit" "s2-verify-metadata" +permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-update-node-1" "s1-commit" "s2-abort" "s1-show-nodes" "s3-update-node-1-back" "s3-manually-fix-metadata" // make sure we have entries in prepared statement cache // then make sure that after we update pg_dist_node, the changes are visible to // the prepared statement -permutation "s2-create-table" "s1-begin" "s1-update-node-nonexistent" "s1-prepare-transaction" "s2-cache-prepared-statement" "s1-commit-prepared" "s2-execute-prepared" "s1-update-node-existent" "s2-drop-table" +permutation "s2-create-table" "s1-begin" "s1-update-node-nonexistent" "s1-prepare-transaction" "s2-cache-prepared-statement" "s1-commit-prepared" "s2-execute-prepared" "s1-update-node-existent" "s3-manually-fix-metadata" diff --git a/src/test/regress/spec/isolation_update_node_lock_writes.spec b/src/test/regress/spec/isolation_update_node_lock_writes.spec index 6915b6a46..74e37c659 100644 --- a/src/test/regress/spec/isolation_update_node_lock_writes.spec +++ b/src/test/regress/spec/isolation_update_node_lock_writes.spec @@ -7,8 +7,12 @@ setup SELECT create_distributed_table('update_node', 'id'); } +// we sleep 2 seconds to let isolation test sync metadata +// which is longer than citus.metadata_sync_interval, 1 second teardown { + SELECT pg_sleep(2); + RESET citus.shard_replication_factor; DROP TABLE update_node CASCADE; diff --git a/src/test/regress/sql/add_coordinator.sql b/src/test/regress/sql/add_coordinator.sql index eb5e37778..2dba78064 100644 --- a/src/test/regress/sql/add_coordinator.sql +++ b/src/test/regress/sql/add_coordinator.sql @@ -2,6 +2,9 @@ -- ADD_COORDINATOR -- +-- node trying to add itself without specifying groupid => 0 should error out +SELECT master_add_node('localhost', :master_port); + SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset -- adding the same node again should return the existing nodeid diff --git a/src/test/regress/sql/aggregate_support.sql b/src/test/regress/sql/aggregate_support.sql index 9c62ee074..b2cd063c7 100644 --- a/src/test/regress/sql/aggregate_support.sql +++ b/src/test/regress/sql/aggregate_support.sql @@ -556,6 +556,7 @@ create function dummy_fnc(a dummy_tbl, d double precision) RETURNS dummy_tbl -- test in tx block -- shouldn't distribute, as citus.create_object_propagation is set to deferred BEGIN; +SET LOCAL citus.create_object_propagation TO deferred; create aggregate dependent_agg (float8) (stype=dummy_tbl, sfunc=dummy_fnc); COMMIT; -- verify not distributed @@ -602,6 +603,31 @@ SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid DROP TABLE dummy_tbl CASCADE; +-- Show that polymorphic aggregates with zero-argument works +CREATE FUNCTION stfnp_zero_arg(int[]) RETURNS int[] AS +'select $1' LANGUAGE SQL; + +CREATE FUNCTION ffp_zero_arg(anyarray) RETURNS anyarray AS +'select $1' LANGUAGE SQL; + +CREATE AGGREGATE zero_arg_agg(*) (SFUNC = stfnp_zero_arg, STYPE = int4[], + FINALFUNC = ffp_zero_arg, INITCOND = '{}'); + +CREATE TABLE zero_arg_agg_table(f1 int, f2 int[]); +SELECT create_distributed_table('zero_arg_agg_table','f1'); +INSERT INTO zero_arg_agg_table VALUES(1, array[1]); +INSERT INTO zero_arg_agg_table VALUES(1, array[11]); + +SELECT zero_arg_agg(*) from zero_arg_agg_table; + +-- Show that after dropping a table on which functions and aggregates depending on +-- pg_dist_object is consistent on coordinator and worker node. +SELECT pg_identify_object_as_address(classid, objid, objsubid)::text +FROM pg_catalog.pg_dist_object + EXCEPT +SELECT unnest(result::text[]) AS unnested_result +FROM run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) from pg_catalog.pg_dist_object$$); + SET citus.create_object_propagation TO automatic; begin; create type typ1 as (a int); @@ -612,5 +638,13 @@ RESET citus.create_object_propagation; SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%dependent_agg%';$$); +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, basetype = int4, stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); + +SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%newavg%';$$); + set client_min_messages to error; drop schema aggregate_support cascade; diff --git a/src/test/regress/sql/arbitrary_configs_truncate.sql b/src/test/regress/sql/arbitrary_configs_truncate.sql new file mode 100644 index 000000000..81b44559a --- /dev/null +++ b/src/test/regress/sql/arbitrary_configs_truncate.sql @@ -0,0 +1,37 @@ +SET search_path TO truncate_tests_schema; + +-- Test truncate rollback on a basic table +SELECT COUNT(*) FROM basic_table; + +BEGIN; +TRUNCATE basic_table; +SELECT COUNT(*) FROM basic_table; +ROLLBACK; + +SELECT COUNT(*) FROM basic_table; + +-- Test truncate on a basic table +SELECT COUNT(*) FROM basic_table; + +TRUNCATE basic_table; + +SELECT COUNT(*) FROM basic_table; + +-- Test trucate rollback on partitioned table +SELECT COUNT(*) FROM partitioned_table_0; + +BEGIN; +TRUNCATE partitioned_table; +SELECT COUNT(*) FROM partitioned_table_0; +ROLLBACK; + +SELECT COUNT(*) FROM partitioned_table_0; + +-- Test truncate a partioned table +SELECT COUNT(*) FROM partitioned_table; +SELECT COUNT(*) FROM partitioned_table_1; + +TRUNCATE partitioned_table; + +SELECT COUNT(*) FROM partitioned_table; +SELECT COUNT(*) FROM partitioned_table_1; diff --git a/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql b/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql new file mode 100644 index 000000000..50f4d2318 --- /dev/null +++ b/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql @@ -0,0 +1,28 @@ +SET search_path TO truncate_cascade_tests_schema; + +-- Test truncate error on table with dependencies +TRUNCATE table_with_pk; + +-- Test truncate rollback on table with dependencies +SELECT COUNT(*) FROM table_with_fk_1; +SELECT COUNT(*) FROM table_with_fk_2; + +BEGIN; +TRUNCATE table_with_pk CASCADE; +SELECT COUNT(*) FROM table_with_fk_1; +SELECT COUNT(*) FROM table_with_fk_2; +ROLLBACK; + +SELECT COUNT(*) FROM table_with_fk_1; +SELECT COUNT(*) FROM table_with_fk_2; + +-- Test truncate on table with dependencies +SELECT COUNT(*) FROM table_with_pk; +SELECT COUNT(*) FROM table_with_fk_1; +SELECT COUNT(*) FROM table_with_fk_2; + +TRUNCATE table_with_pk CASCADE; + +SELECT COUNT(*) FROM table_with_pk; +SELECT COUNT(*) FROM table_with_fk_1; +SELECT COUNT(*) FROM table_with_fk_2; diff --git a/src/test/regress/sql/arbitrary_configs_truncate_cascade_create.sql b/src/test/regress/sql/arbitrary_configs_truncate_cascade_create.sql new file mode 100644 index 000000000..332c84c5f --- /dev/null +++ b/src/test/regress/sql/arbitrary_configs_truncate_cascade_create.sql @@ -0,0 +1,17 @@ +CREATE SCHEMA truncate_cascade_tests_schema; +SET search_path TO truncate_cascade_tests_schema; + +-- tables connected with foreign keys +CREATE TABLE table_with_pk(a bigint PRIMARY KEY); +CREATE TABLE table_with_fk_1(a bigint, b bigint, FOREIGN KEY (b) REFERENCES table_with_pk(a)); +CREATE TABLE table_with_fk_2(a bigint, b bigint, FOREIGN KEY (b) REFERENCES table_with_pk(a)); + +-- distribute tables +SELECT create_reference_table('table_with_pk'); +SELECT create_distributed_table('table_with_fk_1', 'a'); +SELECT create_reference_table('table_with_fk_2'); + +-- fill tables with data +INSERT INTO table_with_pk(a) SELECT n FROM generate_series(1, 10) n; +INSERT INTO table_with_fk_1(a, b) SELECT n, n FROM generate_series(1, 10) n; +INSERT INTO table_with_fk_2(a, b) SELECT n, n FROM generate_series(1, 10) n; diff --git a/src/test/regress/sql/arbitrary_configs_truncate_create.sql b/src/test/regress/sql/arbitrary_configs_truncate_create.sql new file mode 100644 index 000000000..7b6f19692 --- /dev/null +++ b/src/test/regress/sql/arbitrary_configs_truncate_create.sql @@ -0,0 +1,23 @@ +CREATE SCHEMA truncate_tests_schema; +SET search_path TO truncate_tests_schema; + +-- simple table +CREATE TABLE basic_table(a int); + +-- partioned table +CREATE TABLE partitioned_table(a int) PARTITION BY RANGE(a); +CREATE TABLE partitioned_table_0 PARTITION OF partitioned_table +FOR VALUES FROM (1) TO (6); +CREATE TABLE partitioned_table_1 PARTITION OF partitioned_table +FOR VALUES FROM (6) TO (11); + +-- distribute tables +SELECT create_distributed_table('basic_table', 'a'); +SELECT create_distributed_table('partitioned_table', 'a'); + +-- fill tables with data +INSERT INTO basic_table(a) SELECT n FROM generate_series(1, 10) n; +INSERT INTO partitioned_table(a) SELECT n FROM generate_series(1, 10) n; + + + diff --git a/src/test/regress/sql/arbitrary_configs_truncate_partition.sql b/src/test/regress/sql/arbitrary_configs_truncate_partition.sql new file mode 100644 index 000000000..46f19416a --- /dev/null +++ b/src/test/regress/sql/arbitrary_configs_truncate_partition.sql @@ -0,0 +1,12 @@ +SET search_path TO truncate_partition_tests_schema; + +-- Test truncate on a partition +SELECT COUNT(*) FROM partitioned_table; +SELECT COUNT(*) FROM partitioned_table_0; +SELECT COUNT(*) FROM partitioned_table_1; + +TRUNCATE partitioned_table_0; + +SELECT COUNT(*) FROM partitioned_table; +SELECT COUNT(*) FROM partitioned_table_0; +SELECT COUNT(*) FROM partitioned_table_1; diff --git a/src/test/regress/sql/arbitrary_configs_truncate_partition_create.sql b/src/test/regress/sql/arbitrary_configs_truncate_partition_create.sql new file mode 100644 index 000000000..1e6109f15 --- /dev/null +++ b/src/test/regress/sql/arbitrary_configs_truncate_partition_create.sql @@ -0,0 +1,15 @@ +CREATE SCHEMA truncate_partition_tests_schema; +SET search_path TO truncate_partition_tests_schema; + +-- partioned table +CREATE TABLE partitioned_table(a int) PARTITION BY RANGE(a); +CREATE TABLE partitioned_table_0 PARTITION OF partitioned_table +FOR VALUES FROM (1) TO (6); +CREATE TABLE partitioned_table_1 PARTITION OF partitioned_table +FOR VALUES FROM (6) TO (11); + +-- distribute tables +SELECT create_distributed_table('partitioned_table', 'a'); + +-- fill tables with data +INSERT INTO partitioned_table(a) SELECT n FROM generate_series(1, 10) n; diff --git a/src/test/regress/sql/check_mx.sql b/src/test/regress/sql/check_mx.sql index 6c9b2b664..628e86445 100644 --- a/src/test/regress/sql/check_mx.sql +++ b/src/test/regress/sql/check_mx.sql @@ -1,3 +1,10 @@ SHOW citus.enable_metadata_sync; SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; + +-- Show that pg_dist_object entities are same on all nodes +SELECT pg_identify_object_as_address(classid, objid, objsubid)::text +FROM pg_catalog.pg_dist_object + EXCEPT +SELECT unnest(result::text[]) AS unnested_result +FROM run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) from pg_catalog.pg_dist_object$$); diff --git a/src/test/regress/sql/columnar_create.sql b/src/test/regress/sql/columnar_create.sql index a5861bb45..4037dd28b 100644 --- a/src/test/regress/sql/columnar_create.sql +++ b/src/test/regress/sql/columnar_create.sql @@ -129,3 +129,6 @@ SELECT COUNT(*)=1 FROM pg_class WHERE relname='columnar_temp'; SELECT COUNT(*)=0 FROM columnar_temp; -- since we deleted all the rows, we shouldn't have any stripes for table SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_storage_id); + +-- make sure citus_columnar can be loaded +LOAD 'citus_columnar'; diff --git a/src/test/regress/sql/data_types.sql b/src/test/regress/sql/data_types.sql index 5dd9b1496..cc85d8703 100644 --- a/src/test/regress/sql/data_types.sql +++ b/src/test/regress/sql/data_types.sql @@ -131,5 +131,15 @@ INSERT INTO data_types_table SELECT * FROM data_types_table LIMIT 100000 ON CONF INSERT INTO data_types_table (dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38) SELECT dist_key+1, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38 FROM data_types_table ON CONFLICT (dist_key) DO UPDATE SET useless_column = 10; + +-- test type names that start with underscore +CREATE TYPE underscore_type_1 AS (a INT); +CREATE TYPE _underscore_type_1 AS (a INT); + +CREATE TYPE underscore_type_2 AS ENUM ('a'); +CREATE TYPE _underscore_type_2 AS ENUM ('a'); + +SELECT result FROM run_command_on_all_nodes('SELECT count(*) FROM pg_type WHERE typname LIKE ''%underscore\_type%'''); + SET client_min_messages TO ERROR; DROP SCHEMA data_types CASCADE; diff --git a/src/test/regress/sql/distributed_collations.sql b/src/test/regress/sql/distributed_collations.sql index 669577a09..6e6e35263 100644 --- a/src/test/regress/sql/distributed_collations.sql +++ b/src/test/regress/sql/distributed_collations.sql @@ -109,3 +109,26 @@ CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider SET citus.enable_ddl_propagation TO off; DROP SCHEMA collation_creation_on_worker; SET citus.enable_ddl_propagation TO on; + +\c - - - :master_port + +-- will skip trying to propagate the collation due to temp schema +CREATE COLLATION pg_temp.temp_collation (provider = icu, locale = 'de-u-co-phonebk'); + +SET client_min_messages TO ERROR; +CREATE USER alter_collation_user; +SELECT 1 FROM run_command_on_workers('CREATE USER alter_collation_user'); +RESET client_min_messages; + +CREATE COLLATION alter_collation FROM "C"; +ALTER COLLATION alter_collation OWNER TO alter_collation_user; + +SELECT result FROM run_command_on_all_nodes(' + SELECT collowner::regrole FROM pg_collation WHERE collname = ''alter_collation''; +'); + +DROP COLLATION alter_collation; +SET client_min_messages TO ERROR; +DROP USER alter_collation_user; +SELECT 1 FROM run_command_on_workers('DROP USER alter_collation_user'); +RESET client_min_messages; diff --git a/src/test/regress/sql/distributed_functions.sql b/src/test/regress/sql/distributed_functions.sql index 9d31dbc1e..b155cf986 100644 --- a/src/test/regress/sql/distributed_functions.sql +++ b/src/test/regress/sql/distributed_functions.sql @@ -61,6 +61,33 @@ CREATE FUNCTION add_polygons(polygon, polygon) RETURNS int IMMUTABLE RETURNS NULL ON NULL INPUT; +CREATE FUNCTION agg_dummy_func(state int, item int) +RETURNS int IMMUTABLE LANGUAGE plpgsql AS $$ +begin + return state + item; +end; +$$; + +SET client_min_messages TO WARNING; +-- will skip trying to propagate the aggregate due to temp schema +CREATE AGGREGATE pg_temp.dummy_agg(int) ( + sfunc = agg_dummy_func, + stype = int, + sspace = 8, + finalfunc = agg_dummy_func, + finalfunc_extra, + initcond = '5', + msfunc = agg_dummy_func, + mstype = int, + msspace = 12, + minvfunc = agg_dummy_func, + mfinalfunc = agg_dummy_func, + mfinalfunc_extra, + minitcond = '1', + sortop = ">" +); +RESET client_min_messages; + -- Test some combination of functions without ddl propagation -- This will prevent the workers from having those types created. They are -- created just-in-time on function distribution diff --git a/src/test/regress/sql/distributed_planning.sql b/src/test/regress/sql/distributed_planning.sql index b388ad390..b19654ff4 100644 --- a/src/test/regress/sql/distributed_planning.sql +++ b/src/test/regress/sql/distributed_planning.sql @@ -359,3 +359,313 @@ SELECT count(*), event FROM date_part_table WHERE event_time > '2020-01-05' GROU SELECT count(*), event FROM date_part_table WHERE user_id = 12 AND event_time = '2020-01-12 12:00:00' GROUP BY event ORDER BY count(*) DESC, event DESC LIMIT 5; SELECT count(*), t1.event FROM date_part_table t1 JOIN date_part_table t2 USING (user_id) WHERE t1.user_id = 1 AND t2.event_time > '2020-01-03' GROUP BY t1.event ORDER BY count(*) DESC, t1.event DESC LIMIT 5; +TRUNCATE test; +TRUNCATE ref; +insert into test(x, y) SELECT 1, i FROM generate_series(1, 10) i; +insert into test(x, y) SELECT 3, i FROM generate_series(11, 40) i; +insert into test(x, y) SELECT i, 1 FROM generate_series(1, 10) i; +insert into test(x, y) SELECT i, 3 FROM generate_series(11, 40) i; + +insert into ref(a, b) SELECT i, 1 FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT i, 3 FROM generate_series(11, 40) i; +insert into ref(a, b) SELECT 1, i FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT 3, i FROM generate_series(11, 40) i; + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.x + LIMIT 2 + ) q; + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.y + LIMIT 2 + ) q; + +-- Since the only correlates on the distribution column, this can be safely +-- pushed down. But this is currently considered to hard to detect, so we fail. +-- +-- SELECT count(*) +-- FROM ref, +-- LATERAL ( +-- SELECT +-- test.x +-- FROM test +-- WHERE +-- test.x = ref.a +-- LIMIT 2 +-- ) q; + +-- This returns wrong results when pushed down. Instead of returning 2 rows, +-- for each row in the reference table. It would return (2 * number of shards) +-- rows for each row in the reference table. +-- See issue #5327 +-- +-- SELECT count(*) +-- FROM ref, +-- LATERAL ( +-- SELECT +-- test.y +-- FROM test +-- WHERE +-- test.y = ref.a +-- LIMIT 2 +-- ) q; + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q; + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.x = test.x + LIMIT 2 + ) q; + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q JOIN test ON test.x = q.y; + +-- Would require repartitioning to work with subqueries +-- +-- SELECT count(*) +-- FROM test, +-- LATERAL ( +-- SELECT +-- test_2.x +-- FROM test test_2 +-- WHERE +-- test_2.x = test.y +-- LIMIT 2 +-- ) q ; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q +; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.y, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + LIMIT 2 + ) q +; + +-- Too complex joins for Citus to handle currently +-- +-- SELECT count(*) +-- FROM ref JOIN test on ref.b = test.x, +-- LATERAL ( +-- SELECT +-- test_2.x +-- FROM test test_2 +-- WHERE +-- test_2.x = ref.a +-- LIMIT 2 +-- ) q +-- ; + +-- Would require repartitioning to work with subqueries +-- +-- SELECT count(*) +-- FROM ref JOIN test on ref.b = test.x, +-- LATERAL ( +-- SELECT +-- test_2.y +-- FROM test test_2 +-- WHERE +-- test_2.y = ref.a +-- LIMIT 2 +-- ) q +-- ; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.x + LIMIT 2 + ) q +; + +-- Without LIMIT clauses +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.x + ) q; + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + ref.a + FROM ref + WHERE + ref.b = test.y + ) q; + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.x + FROM test + WHERE + test.x = ref.a + ) q; + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + ) q; + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q; + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.x = test.x + ) q; + +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.y + ) q ; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q +; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.y, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q +; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = ref.a + ) q +; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.y = ref.a + ) q +; + +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.x + ) q +; + +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + ref_2.b y + FROM ref ref_2 + WHERE + ref_2.b = ref.a + ) q JOIN test ON test.x = q.y; + diff --git a/src/test/regress/sql/distributed_types.sql b/src/test/regress/sql/distributed_types.sql index 944bc9702..085518ffa 100644 --- a/src/test/regress/sql/distributed_types.sql +++ b/src/test/regress/sql/distributed_types.sql @@ -48,10 +48,9 @@ CREATE TABLE t3 (a int PRIMARY KEY, b tc2); SELECT create_distributed_table('t3','a'); INSERT INTO t3 VALUES (4, ('5',6)::tc2); SELECT * FROM t3; - +COMMIT; -- verify typmod was propagated SELECT run_command_on_workers($$SELECT atttypmod FROM pg_attribute WHERE attnum = 1 AND attrelid = (SELECT typrelid FROM pg_type WHERE typname = 'tc2');$$); -COMMIT; -- transaction block with simple type BEGIN; @@ -335,6 +334,35 @@ CREATE TYPE circ_type1 AS (a int); CREATE TYPE circ_type2 AS (a int, b circ_type1); ALTER TYPE circ_type1 ADD ATTRIBUTE b circ_type2; +-- Show that types can be created locally if has unsupported dependency +CREATE TYPE text_local_def; +CREATE FUNCTION text_local_def_in(cstring) + RETURNS text_local_def + AS 'textin' + LANGUAGE internal STRICT IMMUTABLE; +CREATE FUNCTION text_local_def_out(text_local_def) + RETURNS cstring + AS 'textout' + LANGUAGE internal STRICT IMMUTABLE; +CREATE TYPE text_local_def ( + internallength = variable, + input = text_local_def_in, + output = text_local_def_out, + alignment = int4, + default = 'zippo' +); + +-- It should be created locally as it has unsupported dependency +CREATE TYPE default_test_row AS (f1 text_local_def, f2 int4); + +-- Distributing table depending on that type should error out +CREATE TABLE table_text_local_def(id int, col_1 default_test_row); +SELECT create_distributed_table('table_text_local_def','id'); + +-- will skip trying to propagate the type/enum due to temp schema +CREATE TYPE pg_temp.temp_type AS (int_field int); +CREATE TYPE pg_temp.temp_enum AS ENUM ('one', 'two', 'three'); + -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql index 0d7151e3f..eab7927e4 100644 --- a/src/test/regress/sql/function_propagation.sql +++ b/src/test/regress/sql/function_propagation.sql @@ -45,6 +45,7 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas -- Have a separate check for type created in transaction BEGIN; + SET LOCAL citus.create_object_propagation TO deferred; CREATE TYPE function_prop_type_3 AS (a int, b int); COMMIT; @@ -132,7 +133,6 @@ BEGIN; END; $$; - -- Within transaction functions are not distributed SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; COMMIT; @@ -832,6 +832,27 @@ ALTER TABLE table_1_for_circ_dep_3 ADD COLUMN col_2 table_2_for_circ_dep_3; -- It should error out due to circular dependency SELECT create_distributed_table('table_1_for_circ_dep_3','id'); +-- will skip trying to propagate the function due to temp schema +CREATE FUNCTION pg_temp.temp_func(group_size BIGINT) RETURNS SETOF integer[] +AS $$ + SELECT array_agg(s) OVER w + FROM generate_series(1,5) s + WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING) +$$ LANGUAGE SQL STABLE; + +SELECT create_distributed_function('pg_temp.temp_func(BIGINT)'); + +-- Show that support functions are supported +CREATE FUNCTION func_with_support(int, int) RETURNS bool + LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE + AS $$int4eq$$ SUPPORT generate_series_int8_support; + +CREATE FUNCTION func_with_support_2(int, int) RETURNS bool + LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE + AS $$int4eq$$; + +ALTER FUNCTION func_with_support_2(int, int) SUPPORT generate_series_int8_support; + RESET search_path; SET client_min_messages TO WARNING; DROP SCHEMA function_propagation_schema CASCADE; diff --git a/src/test/regress/sql/global_cancel.sql b/src/test/regress/sql/global_cancel.sql index 6c4341877..6dc58aa2d 100644 --- a/src/test/regress/sql/global_cancel.sql +++ b/src/test/regress/sql/global_cancel.sql @@ -41,17 +41,15 @@ SELECT pg_cancel_backend(citus_backend_gpid()); \c - postgres - :master_port -SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodeport = :master_port \gset - SET client_min_messages TO DEBUG; -- 10000000000 is the node id multiplier for global pid -SELECT pg_cancel_backend(10000000000 * :coordinator_node_id + 0); -SELECT pg_terminate_backend(10000000000 * :coordinator_node_id + 0); +SELECT pg_cancel_backend(10000000000 * citus_coordinator_nodeid() + 0); +SELECT pg_terminate_backend(10000000000 * citus_coordinator_nodeid() + 0); RESET client_min_messages; -SELECT citus_backend_gpid() = citus_calculate_gpid(:coordinator_node_id, pg_backend_pid()); +SELECT citus_backend_gpid() = citus_calculate_gpid(citus_coordinator_nodeid(), pg_backend_pid()); SELECT nodename = citus_nodename_for_nodeid(nodeid) AND nodeport = citus_nodeport_for_nodeid(nodeid) FROM pg_dist_node diff --git a/src/test/regress/sql/index_create.sql b/src/test/regress/sql/index_create.sql new file mode 100644 index 000000000..34ceb02a1 --- /dev/null +++ b/src/test/regress/sql/index_create.sql @@ -0,0 +1,41 @@ +CREATE SCHEMA index_create; +SET search_path TO index_create; + +CREATE TABLE test_tbl (a INT NOT NULL PRIMARY KEY, b text, c BIGINT); +CREATE UNIQUE INDEX CONCURRENTLY a_index ON test_tbl (a); +SELECT create_distributed_table('test_tbl','a'); + +-- suppress the WARNING message: not propagating CLUSTER command to worker nodes +SET client_min_messages TO ERROR; +CLUSTER test_tbl USING test_tbl_pkey; +RESET client_min_messages; + +BEGIN; + CREATE INDEX idx1 ON test_tbl (a) INCLUDE (b, c); + DROP TABLE test_tbl; +ROLLBACK; + +CREATE INDEX idx1 ON test_tbl (a) INCLUDE (b, c) WHERE a > 10; +CREATE INDEX idx2 ON test_tbl (lower(b)); + +CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); +-- create its partitions +CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); +CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); + +-- create indexes on the parent +CREATE INDEX IF NOT EXISTS partitioned_idx_1 ON ONLY partitioning_test (id); +CREATE INDEX IF NOT EXISTS partitioned_idx_2 ON partitioning_test (id, time NULLS FIRST); + +SELECT create_distributed_table('partitioning_test', 'id'); + +-- create hash index on distributed partitioned table +CREATE INDEX partition_idx_hash ON partitioning_test USING hash (id); + +-- change statistics of index +ALTER INDEX idx2 ALTER COLUMN 1 SET STATISTICS 1000; + +-- test reindex +REINDEX INDEX idx1; + +ALTER TABLE test_tbl REPLICA IDENTITY USING INDEX a_index; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index cf123ff49..a2b4ccf8d 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -437,6 +437,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; + SET citus.enable_ddl_propagation TO OFF; \set VERBOSITY terse CREATE TYPE distributed_test_type AS (a int, b int); @@ -831,6 +832,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SET citus.enable_ddl_propagation TO OFF; CREATE COLLATION collation_t1 (provider = icu, locale = 'de-u-co-phonebk'); CREATE COLLATION caseinsensitive (provider = icu, locale = 'und-u-ks-level2'); diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql index 5067cf5a1..11893f806 100644 --- a/src/test/regress/sql/multi_explain.sql +++ b/src/test/regress/sql/multi_explain.sql @@ -1084,5 +1084,11 @@ EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subqu PREPARE dummy_prep_stmt(int) AS SELECT FROM distributed_table_1; EXPLAIN :default_analyze_flags EXECUTE dummy_prep_stmt(50); +CREATE TYPE multi_explain.int_wrapper_type AS (int_field int); +CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type); +SELECT create_distributed_table('tbl', 'a'); + +EXPLAIN :default_analyze_flags SELECT * FROM tbl; + SET client_min_messages TO ERROR; DROP SCHEMA multi_explain CASCADE; diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 6c22a8403..414b0d73f 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -398,6 +398,41 @@ BEGIN END; $BODY$ LANGUAGE plpgsql; + +CREATE TABLE test_for_func( + a int +); + +SELECT create_distributed_table('test_for_func', 'a'); + +-- create a function that depends on a relation that depends on an extension +CREATE OR REPLACE FUNCTION function_on_table_depends_on_extension ( + p_table_name text) +RETURNS TABLE (LIKE pg_dist_partition) +AS $$ +BEGIN + RETURN QUERY + SELECT * FROM pg_dist_partition WHERE logicalrelid::regclass::text = p_table_name; +END; +$$ LANGUAGE plpgsql; + +SELECT logicalrelid FROM function_on_table_depends_on_extension('test_for_func'); + + +-- create a function that depends on a relation that does not depend on an extension +CREATE TABLE local_test(a int); +CREATE OR REPLACE FUNCTION function_on_table_does_not_depend_on_extension ( + input int) +RETURNS TABLE (LIKE local_test) +AS $$ +BEGIN + RETURN QUERY + SELECT * FROM local_test WHERE a = input; +END; +$$ LANGUAGE plpgsql; + +SELECT * FROM function_on_table_does_not_depend_on_extension(5); + -- hide plpgsql messages as they differ across pg versions \set VERBOSITY terse diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index b2f457e5a..0d67bb68b 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -779,7 +779,8 @@ ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT; ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT; SELECT pg_reload_conf(); -UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; +-- make sure that all the nodes have valid metadata before moving forward +SELECT wait_until_metadata_sync(60000); SELECT master_add_node('localhost', :worker_2_port); diff --git a/src/test/regress/sql/multi_move_mx.sql b/src/test/regress/sql/multi_move_mx.sql index 5689e8f80..740d19462 100644 --- a/src/test/regress/sql/multi_move_mx.sql +++ b/src/test/regress/sql/multi_move_mx.sql @@ -3,8 +3,6 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1550000; -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - -- Create mx test tables SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; @@ -134,9 +132,3 @@ LIMIT 1 OFFSET 1; DROP TABLE mx_table_1; DROP TABLE mx_table_2; DROP TABLE mx_table_3; -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -\c - - - :worker_2_port -DELETE FROM pg_dist_node; -DELETE FROM pg_dist_partition; -DELETE FROM pg_dist_shard; -DELETE FROM pg_dist_shard_placement; diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index 57017c90c..b56329150 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -41,9 +41,16 @@ SET search_path TO 'mx_hide_shard_names'; SELECT * FROM citus_shards_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2; SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'mx_hide_shard_names' ORDER BY 2; +-- make sure that pg_class queries do not get blocked on table locks +begin; +lock table test_table in access exclusive mode; +prepare transaction 'take-aggressive-lock'; + -- shards are hidden when using psql as application_name SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; +commit prepared 'take-aggressive-lock'; + -- now create an index \c - - - :master_port SET search_path TO 'mx_hide_shard_names'; @@ -200,6 +207,38 @@ SELECT * FROM citus_shard_indexes_on_worker WHERE "Schema" = 'CiTuS.TeeN' ORDER \d \di + +\c - - - :worker_1_port +-- re-connect to the worker node and show that only +-- client backends can filter shards +SET search_path TO "CiTuS.TeeN"; + +-- Create the necessary test utility function +SET citus.enable_metadata_sync TO off; +CREATE OR REPLACE FUNCTION set_backend_type(backend_type int) + RETURNS void + LANGUAGE C STRICT + AS 'citus'; +RESET citus.enable_metadata_sync; + +-- the shards and indexes do not show up +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + +-- say, we set it to bgworker +-- the shards and indexes do not show up +SELECT set_backend_type(4); +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + +-- or, we set it to walsender +-- the shards and indexes do show up +SELECT set_backend_type(9); +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + +-- but, client backends to see the shards +SELECT set_backend_type(3); +SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + + -- clean-up \c - - - :master_port diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql index 52bdb4588..e2eceb0b3 100644 --- a/src/test/regress/sql/multi_mx_schema_support.sql +++ b/src/test/regress/sql/multi_mx_schema_support.sql @@ -351,7 +351,44 @@ CREATE SCHEMA localschema; -- should error out SELECT run_command_on_workers($$DROP SCHEMA localschema;$$); +SET client_min_messages TO ERROR; +CREATE ROLE schema_owner WITH LOGIN; +RESET client_min_messages; +SELECT run_command_on_workers($$SET citus.enable_ddl_propagation TO OFF;CREATE ROLE schema_owner WITH LOGIN;RESET citus.enable_ddl_propagation;$$); +RESET citus.enable_ddl_propagation; +-- create schema with the name of the owner +CREATE SCHEMA AUTHORIZATION schema_owner; +-- verify the schema is created on workers +SELECT run_command_on_workers($$SELECT COUNT(*) FROM pg_namespace WHERE nspname='schema_owner';$$); + +DROP SCHEMA schema_owner; + +-- test CREATE SCHEMA .. GRANT ON SCHEMA commands +-- first create the role to be granted +SET citus.enable_ddl_propagation TO OFF; +SET client_min_messages TO ERROR; +CREATE ROLE role_to_be_granted WITH LOGIN; +RESET client_min_messages; +SELECT run_command_on_workers($$SET citus.enable_ddl_propagation TO OFF;CREATE ROLE role_to_be_granted WITH LOGIN;RESET citus.enable_ddl_propagation;$$); RESET citus.enable_ddl_propagation; +CREATE SCHEMA old_schema; +CREATE SCHEMA new_schema + CREATE TABLE t1 (a int) + GRANT ALL ON SCHEMA old_schema TO role_to_be_granted + GRANT ALL ON SCHEMA new_schema TO role_to_be_granted; + +-- the role should be granted on both the new and the old schema +SELECT nspacl FROM pg_namespace WHERE nspname='old_schema' OR nspname='new_schema'; +-- verify on workers +SELECT run_command_on_workers($$SELECT nspacl FROM pg_namespace WHERE nspname='new_schema';$$); +SELECT run_command_on_workers($$SELECT nspacl FROM pg_namespace WHERE nspname='old_schema';$$); + +-- verify the table t1 is created as a local pg table +-- this might be changed after some improvements on use_citus_managed_tables +-- if so, please verify that t1 is added to metadata +SELECT COUNT(*)=0 FROM pg_dist_partition WHERE logicalrelid='new_schema.t1'::regclass; + +DROP SCHEMA old_schema, new_schema CASCADE; DROP SCHEMA mx_old_schema CASCADE; DROP SCHEMA mx_new_schema CASCADE; diff --git a/src/test/regress/sql/multi_replicate_reference_table.sql b/src/test/regress/sql/multi_replicate_reference_table.sql index 121e35c0f..3e0acbc1c 100644 --- a/src/test/regress/sql/multi_replicate_reference_table.sql +++ b/src/test/regress/sql/multi_replicate_reference_table.sql @@ -67,7 +67,6 @@ WHERE DROP TABLE replicate_reference_table_unhealthy; - -- test replicating a reference table when a new node added CREATE TABLE replicate_reference_table_valid(column1 int); SELECT create_reference_table('replicate_reference_table_valid'); @@ -184,6 +183,15 @@ WHERE colocationid IN DROP TABLE replicate_reference_table_rollback; +-- confirm that there is just 1 node +SELECT count(*) FROM pg_dist_node; +-- test whether we can create distributed objects on a single worker node +CREATE TABLE cp_test (a int, b text); +CREATE PROCEDURE ptest1(x text) +LANGUAGE SQL +AS $$ + INSERT INTO cp_test VALUES (1, x); +$$; -- test replicating a reference table when a new node added in TRANSACTION + COMMIT CREATE TABLE replicate_reference_table_commit(column1 int); diff --git a/src/test/regress/sql/multi_table_ddl.sql b/src/test/regress/sql/multi_table_ddl.sql index 1fa5b3a61..fc6539ac9 100644 --- a/src/test/regress/sql/multi_table_ddl.sql +++ b/src/test/regress/sql/multi_table_ddl.sql @@ -122,6 +122,13 @@ ALTER TABLE test_table ALTER COLUMN id3 SET DEFAULT nextval('test_sequence_1'), ALTER TABLE test_table ADD COLUMN id3 bigserial; ALTER TABLE test_table ADD COLUMN id4 bigserial CHECK (id4 > 0); +CREATE SEQUENCE pg_temp.temp_sequence; +CREATE TABLE table_with_temp_sequence ( + dist_key int, + seq_col bigint default nextval('pg_temp.temp_sequence') +); +SELECT create_distributed_table('table_with_temp_sequence', 'dist_key'); + DROP TABLE test_table CASCADE; DROP SEQUENCE test_sequence_0; DROP SEQUENCE test_sequence_1; diff --git a/src/test/regress/sql/partitioning_issue_3970.sql b/src/test/regress/sql/partitioning_issue_3970.sql index b95f2a2ad..c60f428b7 100644 --- a/src/test/regress/sql/partitioning_issue_3970.sql +++ b/src/test/regress/sql/partitioning_issue_3970.sql @@ -42,7 +42,7 @@ WHERE relname LIKE 'part_table%' ORDER BY 1,2,3; -- check the constraint names on the worker node --- verify that check constraınts do not have a shardId suffix +-- verify that check constraints do not have a shardId suffix \c - - - :worker_1_port SELECT relname, conname, pg_catalog.pg_get_constraintdef(con.oid, true) FROM pg_constraint con JOIN pg_class rel ON (rel.oid=con.conrelid) diff --git a/src/test/regress/sql/propagate_extension_commands.sql b/src/test/regress/sql/propagate_extension_commands.sql index ba589cb1c..bd0d01cf7 100644 --- a/src/test/regress/sql/propagate_extension_commands.sql +++ b/src/test/regress/sql/propagate_extension_commands.sql @@ -1,3 +1,7 @@ +-- print whether we're using version > 12 to make version-specific tests clear +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int > 12 AS version_above_twelve; + CREATE SCHEMA "extension'test"; -- use a schema name with escape character @@ -114,26 +118,33 @@ SELECT create_reference_table('ref_table_2'); CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); -CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); -SELECT run_command_on_workers($$ CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); -$$); - -SELECT run_command_on_workers($$ COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -$$); CREATE EXTENSION dict_int FROM unpackaged; SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'dict_int'$$); SELECT run_command_on_workers($$SELECT extversion FROM pg_extension WHERE extname = 'dict_int'$$); --- and add the other node +-- adding the second node will fail as the text search template needs to be created manually +SELECT 1 from master_add_node('localhost', :worker_2_port); + +-- create the text search template manually on the worker +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO false; +CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; +CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; +CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); +RESET citus.enable_metadata_sync; + +\c - - - :master_port +SET client_min_messages TO WARNING; + +-- add the second node now SELECT 1 from master_add_node('localhost', :worker_2_port); -- show that the extension is created on both existing and new node @@ -196,6 +207,7 @@ SET search_path TO "extension'test"; -- enable it and see that create command errors but continues its execution by changing citus.multi_shard_modify_mode TO 'off BEGIN; + SET LOCAL citus.create_object_propagation TO deferred; CREATE TABLE some_random_table (a int); SELECT create_distributed_table('some_random_table', 'a'); CREATE EXTENSION seg; @@ -345,6 +357,7 @@ DROP TABLE test_extension_function; -- Test extension function altering distribution argument BEGIN; SET citus.shard_replication_factor = 1; +SET citus.multi_shard_modify_mode TO sequential; CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); CREATE EXTENSION cube; @@ -358,5 +371,10 @@ SELECT distribution_argument_index FROM pg_catalog.pg_dist_object WHERE classid objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); ROLLBACK; +-- Postgres already doesn't allow creating extensions in temp schema but +-- let's have a test for that to track any furher changes in postgres. +DROP EXTENSION isn CASCADE; +CREATE EXTENSION isn WITH SCHEMA pg_temp; + -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/sql/propagate_statistics.sql b/src/test/regress/sql/propagate_statistics.sql index a66a108c6..7e1f2fa18 100644 --- a/src/test/regress/sql/propagate_statistics.sql +++ b/src/test/regress/sql/propagate_statistics.sql @@ -14,6 +14,8 @@ CREATE TABLE test_stats ( SELECT create_distributed_table('test_stats', 'a'); +CREATE STATISTICS pg_temp.s1 (dependencies) ON a, b FROM test_stats; + CREATE STATISTICS s1 (dependencies) ON a, b FROM test_stats; -- test for distributing an already existing statistics diff --git a/src/test/regress/sql/sequences.sql b/src/test/regress/sql/sequences.sql new file mode 100644 index 000000000..886585053 --- /dev/null +++ b/src/test/regress/sql/sequences.sql @@ -0,0 +1,27 @@ +SET search_path TO sequences_schema; + +-- see the renamed sequence object +select count(*) from pg_sequence where seqrelid = 'renamed_seq'::regclass; + +TRUNCATE seq_test_0; +INSERT INTO seq_test_0 VALUES (1); + +-- verify that sequence works properly +select max(z) into maxval_z from seq_test_0; +select max(y) into maxval_y from seq_test_0; +select max+1=nextval('renamed_seq') as check_sanity from maxval_z; +select max+1=nextval('seq_1') as check_sanity from maxval_y; + +TRUNCATE seq_test_0; +INSERT INTO seq_test_0 VALUES (199999, DEFAULT, DEFAULT); +drop table maxval_z; +select max(z) into maxval_z from seq_test_0; +SELECT pg_sequence_last_value('renamed_seq'::regclass) = max FROM maxval_z; + +TRUNCATE seq_test_0; +BEGIN; + INSERT INTO seq_test_0 VALUES (2); + -- verify that sequence works properly + select max(z)+1=nextval('renamed_seq') as check_sanity from seq_test_0 ; + select max(y)+1=nextval('seq_1') as check_sanity from seq_test_0 ; +COMMIT; diff --git a/src/test/regress/sql/sequences_create.sql b/src/test/regress/sql/sequences_create.sql new file mode 100644 index 000000000..de983e177 --- /dev/null +++ b/src/test/regress/sql/sequences_create.sql @@ -0,0 +1,21 @@ +CREATE SCHEMA sequences_schema; +SET search_path TO sequences_schema; + +CREATE SEQUENCE seq_0; +ALTER SEQUENCE seq_0 AS smallint; + +CREATE SEQUENCE seq_1; +ALTER SEQUENCE seq_1 AS bigint; + +CREATE TABLE seq_test_0 (x bigint, y bigint); +SELECT create_distributed_table('seq_test_0','x'); + +INSERT INTO seq_test_0 SELECT 1, s FROM generate_series(1, 50) s; + +SELECT * FROM seq_test_0 ORDER BY 1, 2 LIMIT 5; + +ALTER TABLE seq_test_0 ADD COLUMN z bigint; +ALTER TABLE seq_test_0 ALTER COLUMN z SET DEFAULT nextval('seq_0'); +ALTER TABLE seq_test_0 ALTER COLUMN y SET DEFAULT nextval('seq_1'); + +ALTER SEQUENCE seq_0 RENAME TO renamed_seq; diff --git a/src/test/regress/sql/sequences_with_different_types.sql b/src/test/regress/sql/sequences_with_different_types.sql new file mode 100644 index 000000000..d9e7e63f5 --- /dev/null +++ b/src/test/regress/sql/sequences_with_different_types.sql @@ -0,0 +1,74 @@ +CREATE SCHEMA sequences_with_different_types; +SET search_path TO sequences_with_different_types; + +CREATE TYPE two_big_ints AS (a bigint, b bigint); +-- by default, sequences get bigint type +CREATE SEQUENCE bigint_sequence_1; +CREATE SEQUENCE bigint_sequence_2 START 10000; +CREATE SEQUENCE bigint_sequence_3 INCREMENT 10; +CREATE SEQUENCE bigint_sequence_4 MINVALUE 1000000; +CREATE SEQUENCE bigint_sequence_5; +CREATE SEQUENCE bigint_sequence_8; + +CREATE TABLE table_1 +( + user_id bigint, + user_code_1 text DEFAULT (('CD'::text || lpad(nextval('bigint_sequence_1'::regclass)::text, 10, '0'::text))), + user_code_2 text DEFAULT nextval('bigint_sequence_2'::regclass)::text, + user_code_3 text DEFAULT (nextval('bigint_sequence_3'::regclass) + 1000)::text, + user_code_4 float DEFAULT nextval('bigint_sequence_4'::regclass), + user_code_5 two_big_ints DEFAULT (nextval('bigint_sequence_5'::regclass), nextval('bigint_sequence_5'::regclass)), + user_code_8 jsonb DEFAULT to_jsonb('test'::text) || to_jsonb(nextval('bigint_sequence_8'::regclass)) + +); +SELECT create_distributed_table('table_1', 'user_id'); + +INSERT INTO table_1 VALUES (1, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT), (2, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING *; + +\c - - - :worker_1_port +SET search_path TO sequences_with_different_types; + +INSERT INTO table_1 VALUES (3, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT), (4, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT, DEFAULT) RETURNING *; + + +\c - - - :master_port +SET search_path TO sequences_with_different_types; +CREATE SEQUENCE bigint_sequence_6; + +CREATE TABLE table_2 +( + user_id bigint, + user_code OID DEFAULT nextval('bigint_sequence_6'::regclass) +); +SELECT create_distributed_table('table_2', 'user_id'); + +-- on the coordinator, the sequence starts from 0 +INSERT INTO table_2 VALUES (1, DEFAULT) RETURNING *; + +\c - - - :worker_1_port +SET search_path TO sequences_with_different_types; + +-- this fails because on the workers the start value of the sequence +-- is greater than the largest value of an oid +INSERT INTO table_2 VALUES (1, DEFAULT) RETURNING *; + +\c - - - :master_port +SET search_path TO sequences_with_different_types; +CREATE SEQUENCE bigint_sequence_7; + +CREATE TABLE table_3 +( + user_id bigint, + user_code boolean DEFAULT ((nextval('bigint_sequence_7'::regclass)%2)::int)::boolean +); +SELECT create_distributed_table('table_3', 'user_id'); + +INSERT INTO table_3 VALUES (1, DEFAULT), (2, DEFAULT) RETURNING *; + +\c - - - :worker_1_port +SET search_path TO sequences_with_different_types; +INSERT INTO table_3 VALUES (3, DEFAULT), (4, DEFAULT) RETURNING *; + +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA sequences_with_different_types CASCADE; diff --git a/src/test/regress/sql/sqlancer_failures.sql b/src/test/regress/sql/sqlancer_failures.sql index 4bd7d3c70..afb7b909f 100644 --- a/src/test/regress/sql/sqlancer_failures.sql +++ b/src/test/regress/sql/sqlancer_failures.sql @@ -91,6 +91,20 @@ RIGHT JOIN (SELECT * FROM reference_table OFFSET 0) c ON (c.id > 0); -- drop existing sqlancer tables before next tests DROP TABLE t0, t1, t2, t3, t4 CASCADE; +CREATE TABLE tbl1(a REAL, b FLOAT, c money); +CREATE TABLE tbl2(a REAL, b FLOAT, c money); + +SELECT create_distributed_table('tbl1', 'a'); +SELECT create_distributed_table('tbl2', 'b'); + +INSERT INTO tbl1 VALUES(1, 1, 1); + +SET citus.enable_repartition_joins to ON; + +SELECT * FROM tbl1, tbl2 WHERE tbl2.c=tbl1.c; + +DROP TABLE tbl1, tbl2 CASCADE; + CREATE TABLE IF NOT EXISTS t0(c0 TEXT CHECK (TRUE), c1 money ) WITH (autovacuum_vacuum_threshold=1180014707, autovacuum_freeze_table_age=13771154, autovacuum_vacuum_cost_delay=23, autovacuum_analyze_threshold=1935153914, autovacuum_freeze_min_age=721733768, autovacuum_enabled=0, autovacuum_vacuum_cost_limit=9983); CREATE UNLOGGED TABLE IF NOT EXISTS t1(LIKE t0); CREATE TABLE t2(LIKE t0 INCLUDING INDEXES); diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql index c0f433599..b5b1f300e 100644 --- a/src/test/regress/sql/text_search.sql +++ b/src/test/regress/sql/text_search.sql @@ -2,7 +2,7 @@ CREATE SCHEMA text_search; CREATE SCHEMA text_search2; SET search_path TO text_search; --- create a new configruation from scratch +-- create a new configuration from scratch CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); CREATE TABLE t1(id int, name text); CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); @@ -26,13 +26,13 @@ COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'on demand propaga CREATE TABLE t1(id int, name text); CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); SELECT create_distributed_table('t1', 'name'); +-- verify that we can change the object +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can be set right now'; +COMMIT; SELECT * FROM run_command_on_workers($$ SELECT obj_description('text_search.my_text_search_config'::regconfig); $$) ORDER BY 1,2; - --- verify that changing anything on a managed TEXT SEARCH CONFIGURATION fails after parallel execution -COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can''t be set right now'; -ABORT; +DROP TABLE t1; -- create an index on an already distributed table BEGIN; @@ -41,10 +41,11 @@ COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config2 IS 'on demand propag CREATE TABLE t1(id int, name text); SELECT create_distributed_table('t1', 'name'); CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config2'::regconfig, (COALESCE(name, ''::character varying))::text)); +COMMIT; SELECT * FROM run_command_on_workers($$ SELECT obj_description('text_search.my_text_search_config2'::regconfig); $$) ORDER BY 1,2; -ABORT; +DROP TABLE t1; -- should be able to create a configuration based on a copy of an existing configuration CREATE TEXT SEARCH CONFIGURATION french_noaccent ( COPY = french ); @@ -83,7 +84,7 @@ $$) ORDER BY 1,2; ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciihword; -- Comment on a text search configuration -COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butcherd to test all edge cases'; +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butchered to test all edge cases'; SELECT * FROM run_command_on_workers($$ SELECT obj_description('text_search.french_noaccent'::regconfig); $$) ORDER BY 1,2; @@ -275,6 +276,142 @@ CREATE TABLE sensors_a_partition PARTITION OF sensors FOR VALUES FROM ('2000-01- CREATE INDEX sensors_search_name ON sensors USING gin (to_tsvector('partial_index_test_config'::regconfig, (COALESCE(name, ''::character varying))::text)); SELECT create_distributed_table('sensors', 'measureid'); +-- create a new dictionary from scratch +CREATE TEXT SEARCH DICTIONARY my_english_dict ( + template = snowball, + language = english, + stopwords = english +); + +-- verify that the dictionary definition is the same in all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'my_english_dict'; +$$); + +-- use the new dictionary in a configuration mapping +CREATE TEXT SEARCH CONFIGURATION my_english_config ( COPY = english ); +ALTER TEXT SEARCH CONFIGURATION my_english_config ALTER MAPPING FOR asciiword WITH my_english_dict; + +-- verify that the dictionary is available on the worker nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.my_english_config', 'The Brightest supernovaes') WHERE alias = 'asciiword' LIMIT 1; +$$); + +-- comment on a text search dictionary +COMMENT ON TEXT SEARCH DICTIONARY my_english_dict IS 'a text search dictionary that is butchered to test all edge cases'; +SELECT result FROM run_command_on_all_nodes($$ + SELECT obj_description('text_search.my_english_dict'::regdictionary); +$$); + +-- remove a comment +COMMENT ON TEXT SEARCH DICTIONARY my_english_dict IS NULL; +SELECT result FROM run_command_on_all_nodes($$ + SELECT obj_description('text_search.my_english_dict'::regdictionary); +$$); + +-- test various ALTER TEXT SEARCH DICTIONARY commands +ALTER TEXT SEARCH DICTIONARY my_english_dict RENAME TO my_turkish_dict; +ALTER TEXT SEARCH DICTIONARY my_turkish_dict (language = turkish, stopwords); +ALTER TEXT SEARCH DICTIONARY my_turkish_dict OWNER TO text_search_owner; +ALTER TEXT SEARCH DICTIONARY my_turkish_dict SET SCHEMA "Text Search Requiring Quote's"; + +-- verify that the dictionary definition is the same in all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'my_turkish_dict'; +$$); + +-- verify that the configuration dictionary is changed in all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.my_english_config', 'The Brightest supernovaes') WHERE alias = 'asciiword' LIMIT 1; +$$); + +-- before testing drops, check that the dictionary exists on all nodes +SELECT result FROM run_command_on_all_nodes($$ + SELECT '"Text Search Requiring Quote''s".my_turkish_dict'::regdictionary; +$$); + +ALTER TEXT SEARCH DICTIONARY "Text Search Requiring Quote's".my_turkish_dict SET SCHEMA text_search; + +-- verify that we can drop the dictionary only with cascade option +DROP TEXT SEARCH DICTIONARY my_turkish_dict; +DROP TEXT SEARCH DICTIONARY my_turkish_dict CASCADE; + +-- verify that it is dropped now +SELECT result FROM run_command_on_all_nodes($$ + SELECT 'my_turkish_dict'::regdictionary; +$$); + +-- test different templates that are used in dictionaries +CREATE TEXT SEARCH DICTIONARY simple_dict ( + TEMPLATE = pg_catalog.simple, + STOPWORDS = english, + accept = false +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'simple_dict'; +$$); + +CREATE TEXT SEARCH DICTIONARY synonym_dict ( + template=synonym, + synonyms='synonym_sample', + casesensitive=1 +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'synonym_dict'; +$$); + +CREATE TEXT SEARCH DICTIONARY thesaurus_dict ( + TEMPLATE = thesaurus, + DictFile = thesaurus_sample, + Dictionary = pg_catalog.english_stem +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'thesaurus_dict'; +$$); + +CREATE TEXT SEARCH DICTIONARY ispell_dict ( + TEMPLATE = ispell, + DictFile = ispell_sample, + AffFile = ispell_sample, + Stopwords = english +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'ispell_dict'; +$$); + +CREATE TEXT SEARCH DICTIONARY snowball_dict ( + TEMPLATE = snowball, + Language = english, + StopWords = english +); +SELECT COUNT(DISTINCT result)=1 FROM run_command_on_all_nodes($$ + SELECT ROW(dictname, dictnamespace::regnamespace, dictowner::regrole, tmplname, dictinitoption) + FROM pg_ts_dict d JOIN pg_ts_template t ON ( d.dicttemplate = t.oid ) + WHERE dictname = 'snowball_dict'; +$$); + +-- will skip trying to propagate the text search configuration due to temp schema +CREATE TEXT SEARCH CONFIGURATION pg_temp.temp_text_search_config ( parser = default ); + +-- will skip trying to propagate the text search dictionary due to temp schema +CREATE TEXT SEARCH DICTIONARY pg_temp.temp_text_search_dict ( + template = snowball, + language = english, + stopwords = english +); + SET client_min_messages TO 'warning'; DROP SCHEMA text_search, text_search2, "Text Search Requiring Quote's" CASCADE; DROP ROLE text_search_owner; diff --git a/src/test/regress/sql/unsupported_lateral_subqueries.sql b/src/test/regress/sql/unsupported_lateral_subqueries.sql new file mode 100644 index 000000000..3e33f107d --- /dev/null +++ b/src/test/regress/sql/unsupported_lateral_subqueries.sql @@ -0,0 +1,197 @@ +CREATE SCHEMA unsupported_lateral_joins; +SET search_path TO unsupported_lateral_joins; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 13354100; + +CREATE TABLE test(x bigint, y bigint); +SELECT create_distributed_table('test','x'); + +CREATE TABLE ref(a bigint, b bigint); +SELECT create_reference_table('ref'); + +insert into test(x, y) SELECT 1, i FROM generate_series(1, 10) i; +insert into test(x, y) SELECT 3, i FROM generate_series(11, 40) i; +insert into test(x, y) SELECT i, 1 FROM generate_series(1, 10) i; +insert into test(x, y) SELECT i, 3 FROM generate_series(11, 40) i; + +insert into ref(a, b) SELECT i, 1 FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT i, 3 FROM generate_series(11, 40) i; +insert into ref(a, b) SELECT 1, i FROM generate_series(1, 10) i; +insert into ref(a, b) SELECT 3, i FROM generate_series(11, 40) i; + +-- The following queries return wrong results when pushed down. Instead of +-- returning 2 rows, for each row in ref table. They would return (2 * number +-- of shards) rows for each row in the reference table. See issue #5327 +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; + +SELECT count(*) +FROM (VALUES (1), (3)) ref(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; + +WITH ref(a) as (select y from test) +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; + +SELECT count(*) +FROM generate_series(1, 3) ref(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; + +SELECT count(*) +FROM (SELECT generate_series(1, 3)) ref(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref.a + LIMIT 2 + ) q; + + +-- make sure right error message is chosen +SELECT count(*) +FROM ref ref_table, + (VALUES (1), (3)) rec_values(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_table.a + LIMIT 2 + ) q; + +SELECT count(*) +FROM ref as ref_table, + (VALUES (1), (3)) ref_values(a), + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_values.a + LIMIT 2 + ) q; + +SELECT count(*) FROM + ref ref_outer, + LATERAL ( + SELECT * FROM + LATERAL ( SELECT * + FROM ref ref_inner, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_outer.a + LIMIT 2 + ) q + ) q2 + ) q3; + +SELECT count(*) FROM + ref ref_outer, + LATERAL ( + SELECT * FROM + LATERAL ( SELECT * + FROM ref ref_inner, + LATERAL ( + SELECT + test.y + FROM test + WHERE + test.y = ref_inner.a + LIMIT 2 + ) q + ) q2 + ) q3; + + + + +-- Since this only correlates on the distribution column, this can be safely +-- pushed down. But this is currently considered to hard to detect, so we fail. +SELECT count(*) +FROM ref, + LATERAL ( + SELECT + test.x + FROM test + WHERE + test.x = ref.a + LIMIT 2 + ) q; + +-- Would require repartitioning to work with subqueries +SELECT count(*) +FROM test, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = test.y + LIMIT 2 + ) q ; + +-- Too complex joins for Citus to handle currently +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.x + FROM test test_2 + WHERE + test_2.x = ref.a + LIMIT 2 + ) q +; + +-- Would require repartitioning to work with subqueries +SELECT count(*) +FROM ref JOIN test on ref.b = test.x, + LATERAL ( + SELECT + test_2.y + FROM test test_2 + WHERE + test_2.y = ref.a + LIMIT 2 + ) q +; + +SET client_min_messages TO WARNING; +DROP SCHEMA unsupported_lateral_joins CASCADE; diff --git a/src/test/regress/sql/views_create.sql b/src/test/regress/sql/views_create.sql new file mode 100644 index 000000000..d30676c42 --- /dev/null +++ b/src/test/regress/sql/views_create.sql @@ -0,0 +1,39 @@ +CREATE SCHEMA views_create; +SET search_path TO views_create; + +CREATE TABLE view_test_table(a INT NOT NULL PRIMARY KEY, b BIGINT, c text); +CREATE OR REPLACE VIEW select_filtered_view AS + SELECT * FROM view_test_table WHERE c = 'testing' + WITH CASCADED CHECK OPTION; +CREATE OR REPLACE VIEW select_all_view AS + SELECT * FROM view_test_table + WITH LOCAL CHECK OPTION; +CREATE OR REPLACE VIEW count_view AS + SELECT COUNT(*) FROM view_test_table; +SELECT create_distributed_table('view_test_table', 'a'); + +INSERT INTO view_test_table VALUES (1,1,'testing'), (2,1,'views'); +SELECT * FROM count_view; +SELECT COUNT(*) FROM count_view; +SELECT COUNT(*) FROM select_all_view; + +SELECT * FROM select_filtered_view; + +-- dummy temp recursive view +CREATE TEMP RECURSIVE VIEW recursive_defined_non_recursive_view(c) AS (SELECT 1); + +CREATE MATERIALIZED VIEW select_all_matview AS + SELECT * FROM view_test_table + WITH DATA; + +CREATE MATERIALIZED VIEW IF NOT EXISTS select_filtered_matview AS + SELECT * FROM view_test_table WHERE c = 'views' + WITH NO DATA; + +REFRESH MATERIALIZED VIEW select_filtered_matview; + +SELECT COUNT(*) FROM select_all_matview; +SELECT * FROM select_filtered_matview; + +SELECT COUNT(*) FROM select_all_view a JOIN select_filtered_matview b ON a.c=b.c; +SELECT COUNT(*) FROM select_all_view a JOIN view_test_table b ON a.c=b.c; diff --git a/src/test/regress/sql_schedule b/src/test/regress/sql_schedule index 924143bec..b8f74d390 100644 --- a/src/test/regress/sql_schedule +++ b/src/test/regress/sql_schedule @@ -6,3 +6,7 @@ test: intermediate_result_pruning_queries_1 intermediate_result_pruning_queries_ test: dropped_columns_1 distributed_planning test: local_dist_join test: connectivity_checks citus_run_command +test: sequences +test: arbitrary_configs_truncate +test: arbitrary_configs_truncate_cascade +test: arbitrary_configs_truncate_partition