mirror of https://github.com/citusdata/citus.git
Merge branch 'master' into fix/subscript-jsonb
commit
bb294f7335
|
@ -6,10 +6,7 @@ orbs:
|
|||
parameters:
|
||||
image_suffix:
|
||||
type: string
|
||||
default: '-v2021_10_27'
|
||||
pg12_version:
|
||||
type: string
|
||||
default: '12.8'
|
||||
default: '-vabaecad'
|
||||
pg13_version:
|
||||
type: string
|
||||
default: '13.4'
|
||||
|
@ -18,7 +15,7 @@ parameters:
|
|||
default: '14.0'
|
||||
upgrade_pg_versions:
|
||||
type: string
|
||||
default: '12.8-13.4-14.0'
|
||||
default: '13.4-14.0'
|
||||
jobs:
|
||||
build:
|
||||
description: Build the citus extension
|
||||
|
@ -529,10 +526,6 @@ workflows:
|
|||
ignore:
|
||||
- /release-[0-9]+\.[0-9]+.*/ # match with releaseX.Y.*
|
||||
|
||||
- build:
|
||||
name: build-12
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
- build:
|
||||
name: build-13
|
||||
pg_major: 13
|
||||
|
@ -545,80 +538,6 @@ workflows:
|
|||
- check-style
|
||||
- check-sql-snapshots
|
||||
|
||||
- test-citus:
|
||||
name: 'test-12_check-multi'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-multi
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-multi-1'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-multi-1
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-mx'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-multi-mx
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-vanilla'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-vanilla
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-isolation'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-isolation
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-worker'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-worker
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-operations'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-operations
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-follower-cluster'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-follower-cluster
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-columnar'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-columnar
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-columnar-isolation'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-columnar-isolation
|
||||
requires: [build-12]
|
||||
- tap-test-citus:
|
||||
name: 'test_12_tap-recovery'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
suite: recovery
|
||||
requires: [build-12]
|
||||
- test-citus:
|
||||
name: 'test-12_check-failure'
|
||||
pg_major: 12
|
||||
image: citus/failtester
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
make: check-failure
|
||||
requires: [build-12]
|
||||
|
||||
- test-citus:
|
||||
name: 'test-13_check-multi'
|
||||
pg_major: 13
|
||||
|
@ -767,11 +686,6 @@ workflows:
|
|||
make: check-failure
|
||||
requires: [build-14]
|
||||
|
||||
- test-arbitrary-configs:
|
||||
name: 'test-12_check-arbitrary-configs'
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
requires: [build-12]
|
||||
- test-arbitrary-configs:
|
||||
name: 'test-13_check-arbitrary-configs'
|
||||
pg_major: 13
|
||||
|
@ -783,20 +697,6 @@ workflows:
|
|||
image_tag: '<< pipeline.parameters.pg14_version >>'
|
||||
requires: [build-14]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-12-13_check-pg-upgrade'
|
||||
old_pg_major: 12
|
||||
new_pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
|
||||
requires: [build-12, build-13]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-12-14_check-pg-upgrade'
|
||||
old_pg_major: 12
|
||||
new_pg_major: 14
|
||||
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
|
||||
requires: [build-12, build-14]
|
||||
|
||||
- test-pg-upgrade:
|
||||
name: 'test-13-14_check-pg-upgrade'
|
||||
old_pg_major: 13
|
||||
|
@ -805,10 +705,10 @@ workflows:
|
|||
requires: [build-13, build-14]
|
||||
|
||||
- test-citus-upgrade:
|
||||
name: test-12_check-citus-upgrade
|
||||
pg_major: 12
|
||||
image_tag: '<< pipeline.parameters.pg12_version >>'
|
||||
requires: [build-12]
|
||||
name: test-13_check-citus-upgrade
|
||||
pg_major: 13
|
||||
image_tag: '<< pipeline.parameters.pg13_version >>'
|
||||
requires: [build-13]
|
||||
|
||||
- ch_benchmark:
|
||||
requires: [build-13]
|
||||
|
|
245
CHANGELOG.md
245
CHANGELOG.md
|
@ -1,3 +1,248 @@
|
|||
### citus v11.0.0_beta (March 22, 2022) ###
|
||||
|
||||
* Drops support for PostgreSQL 12
|
||||
|
||||
* Turns metadata syncing on by default
|
||||
|
||||
* Adds `citus_finalize_upgrade_to_citus11()` which is necessary to upgrade to
|
||||
Citus 11+ from earlier versions
|
||||
|
||||
* Adds `citus.max_client_connections` GUC to limit non-Citus connections
|
||||
|
||||
* Allows locally creating objects having a dependency that cannot be distributed
|
||||
|
||||
* Distributes aggregates with `CREATE AGGREGATE` command
|
||||
|
||||
* Distributes functions with `CREATE FUNCTION` command
|
||||
|
||||
* Adds `citus.create_object_propagation` GUC to control DDL creation behaviour
|
||||
in transactions
|
||||
|
||||
* Hides shards based on `application_name` prefix
|
||||
|
||||
* Prevents specifying `application_name` via `citus.node_conninfo`
|
||||
|
||||
* Starts identifying rebalancer backends by `application_name=citus_rebalancer`
|
||||
|
||||
* Starts identifying internal backends by `application_name=citus_internal`
|
||||
|
||||
* Adds `citus.enable_unsafe_triggers` flag to enable unsafe triggers on
|
||||
distributed tables
|
||||
|
||||
* Adds `fix_partition_shard_index_names` UDF to fix currently broken names
|
||||
|
||||
* Adds propagation for foreign server commands
|
||||
|
||||
* Adds propagation of `TEXT SEARCH CONFIGURATION` objects
|
||||
|
||||
* Adds propagation of `TEXT SEARCH DICTIONARY` objects
|
||||
|
||||
* Adds support for `ALTER FUNCTION ... SUPPORT ...` commands
|
||||
|
||||
* Adds support for `CREATE SCHEMA AUTHORIZATION` statements without schema name
|
||||
|
||||
* Adds support for `TRUNCATE` for foreign tables
|
||||
|
||||
* Adds support for adding local tables to metadata using
|
||||
`citus_add_local_table_to_metadata()` UDF
|
||||
|
||||
* Adds support for adding partitioned local tables to Citus metadata
|
||||
|
||||
* Adds support for automatic binary encoding in re-partition joins when possible
|
||||
|
||||
* Adds support for foreign tables in MX
|
||||
|
||||
* Adds support for operator class parameters in indexes
|
||||
|
||||
* Adds support for re-partition joins in transaction blocks
|
||||
|
||||
* Adds support for re-partition joins on followers
|
||||
|
||||
* Adds support for shard replication > 1 hash distributed tables on Citus MX
|
||||
|
||||
* Improves handling of `IN`, `OUT` and `INOUT` parameters for functions
|
||||
|
||||
* Introduces `citus_backend_gpid()` UDF to get global pid of the current backend
|
||||
|
||||
* Introduces `citus_check_cluster_node_health` UDF to check cluster connectivity
|
||||
|
||||
* Introduces `citus_check_connection_to_node` UDF to check node connectivity
|
||||
|
||||
* Introduces `citus_coordinator_nodeid` UDF to find the node id of the
|
||||
coordinator node
|
||||
|
||||
* Introduces `citus_stat_activity` view and drops `citus_worker_stat_activity`
|
||||
UDF
|
||||
|
||||
* Introduces `citus.use_citus_managed_tables` GUC to add local tables to Citus
|
||||
metadata automatically
|
||||
|
||||
* Introduces a new flag `force_delegation` in `create_distributed_function()`
|
||||
|
||||
* Allows `create_distributed_function()` on a function owned by an extension
|
||||
|
||||
* Allows creating distributed tables in sequential mode
|
||||
|
||||
* Allows disabling nodes when multiple failures happen
|
||||
|
||||
* Adds support for pushing procedures with `OUT` arguments down to the worker
|
||||
nodes
|
||||
|
||||
* Overrides `pg_cancel_backend()` and `pg_terminate_backend()` to run with
|
||||
global pid
|
||||
|
||||
* Delegates function calls of the form `SELECT .. FROM func()`
|
||||
|
||||
* Adds propagation of `CREATE SCHEMA .. GRANT ON SCHEMA ..` commands
|
||||
|
||||
* Propagates `pg_dist_object` to worker nodes
|
||||
|
||||
* Adds propagation of `SCHEMA` operations
|
||||
|
||||
* Adds missing version-mismatch checks for columnar tables
|
||||
|
||||
* Adds missing version-mismatch checks for internal functions
|
||||
|
||||
* `citus_shard_indexes_on_worker` shows all local shard indexes regardless of
|
||||
`search_path`
|
||||
|
||||
* `citus_shards_on_worker` shows all local shards regardless of `search_path`
|
||||
|
||||
* Deprecates inactive shard state, never marks any placement inactive
|
||||
|
||||
* Disables distributed & reference foreign tables
|
||||
|
||||
* Prevents propagating objects having a circular dependency
|
||||
|
||||
* Prevents propagating objects having a dependency to an object with unsupported
|
||||
type
|
||||
|
||||
* Deprecates `master_get_table_metadata` UDF
|
||||
|
||||
* Disallows remote execution from queries on shards
|
||||
|
||||
* Drops `citus.enable_cte_inlining` GUC
|
||||
|
||||
* Drops `citus.single_shard_commit_protocol` GUC, defaults to 2PC
|
||||
|
||||
* Drops support for `citus.multi_shard_commit_protocol`, always use 2PC
|
||||
|
||||
* Avoids unnecessary errors for `ALTER STATISTICS IF EXISTS` when the statistics
|
||||
does not exist
|
||||
|
||||
* Fixes a bug that causes columnar storage pages to have zero LSN
|
||||
|
||||
* Fixes a bug that causes issues while create dependencies from multiple
|
||||
sessions
|
||||
|
||||
* Fixes a bug that causes reading columnar metapage as all-zeros when
|
||||
writing to a columnar table
|
||||
|
||||
* Fixes a bug that could break `DROP SCHEMA/EXTENSON` commands when there is a
|
||||
columnar table
|
||||
|
||||
* Fixes a bug that could break pg upgrades due to missing `pg_depend` records
|
||||
for columnar table access method
|
||||
|
||||
* Fixes a bug that could cause `CREATE INDEX` to fail for expressions when using
|
||||
custom `search_path`
|
||||
|
||||
* Fixes a bug that could cause `worker_save_query_explain_analyze` to fail on
|
||||
custom types
|
||||
|
||||
* Fixes a bug that could cause failed re-partition joins to leak result tables
|
||||
|
||||
* Fixes a bug that could cause prerequisite columnar table access method
|
||||
objects being not created during pg upgrades
|
||||
|
||||
* Fixes a bug that could cause re-partition joins involving local shards to fail
|
||||
|
||||
* Fixes a bug that limits usage of sequences in non-int columns
|
||||
|
||||
* Fixes a bug that prevents `DROP SCHEMA CASCADE`
|
||||
|
||||
* Fixes a build error that happens when `lz4` is not installed
|
||||
|
||||
* Fixes a clog lookup failure that could occur when writing to a columnar table
|
||||
|
||||
* Fixes a crash that occurs when the aggregate that cannot be pushed-down
|
||||
returns empty result from a worker
|
||||
|
||||
* Fixes a missing `FROM` clause entry error
|
||||
|
||||
* Fixes a possible segfault that could happen when reporting distributed
|
||||
deadlock
|
||||
|
||||
* Fixes an issue that could cause unexpected errors when there is an in-progress
|
||||
write to a columnar table
|
||||
|
||||
* Fixes an unexpected error that occurs due to aborted writes to a columnar
|
||||
table with an index
|
||||
|
||||
* Fixes an unexpected error that occurs when writing to a columnar table created
|
||||
in older version
|
||||
|
||||
* Fixes issue when compiling Citus from source with some compilers
|
||||
|
||||
* Fixes issues on `ATTACH PARTITION` logic
|
||||
|
||||
* Fixes naming issues of newly created partitioned indexes
|
||||
|
||||
* Improves self-deadlock prevention for `CREATE INDEX / REINDEX CONCURRENTLY`
|
||||
commands for builds using PG14 or higher
|
||||
|
||||
* Moves `pg_dist_object` to `pg_catalog` schema
|
||||
|
||||
* Partitions shards to be co-located with the parent shards
|
||||
|
||||
* Prevents Citus table functions from being called on shards
|
||||
|
||||
* Prevents creating distributed functions when there are out of sync nodes
|
||||
|
||||
* Provides notice message for idempotent `create_distributed_function` calls
|
||||
|
||||
* Reinstates optimisation for uniform shard interval ranges
|
||||
|
||||
* Relaxes table ownership check to privileges check while acquiring lock
|
||||
|
||||
* Drops support for `citus.shard_placement_policy` GUC
|
||||
|
||||
* Drops `master_append_table_to_shard` UDF
|
||||
|
||||
* Drops `master_apply_delete_command` UDF
|
||||
|
||||
* Removes copy into new shard logic for append-distributed tables
|
||||
|
||||
* Drops support for distributed `cstore_fdw` tables in favor of Citus
|
||||
columnar table access method
|
||||
|
||||
* Removes support for dropping distributed and local indexes in the same
|
||||
statement
|
||||
|
||||
* Replaces `citus.enable_object_propagation` GUC with
|
||||
`citus.enable_metadata_sync`
|
||||
|
||||
* Requires superuser for `citus_add_node()` and `citus_activate_node()` UDFs
|
||||
|
||||
* Revokes read access to `columnar.chunk` from unprivileged user
|
||||
|
||||
* Disallows unsupported lateral subqueries on distributed tables
|
||||
|
||||
* Stops updating shard range in `citus_update_shard_statistics` for append
|
||||
tables
|
||||
|
||||
### citus v10.2.5 (March 15, 2022) ###
|
||||
|
||||
* Fixes a bug that could cause `worker_save_query_explain_analyze` to fail on
|
||||
custom types
|
||||
|
||||
* Fixes a bug that limits usage of sequences in non-integer columns
|
||||
|
||||
* Fixes a crash that occurs when the aggregate that cannot be pushed-down
|
||||
returns empty result from a worker
|
||||
|
||||
* Improves concurrent metadata syncing and metadata changing DDL operations
|
||||
|
||||
### citus v10.2.4 (February 1, 2022) ###
|
||||
|
||||
* Adds support for operator class parameters in indexes
|
||||
|
|
12
Makefile
12
Makefile
|
@ -13,10 +13,16 @@ include Makefile.global
|
|||
|
||||
all: extension
|
||||
|
||||
|
||||
# build columnar only
|
||||
columnar:
|
||||
$(MAKE) -C src/backend/columnar all
|
||||
# build extension
|
||||
extension: $(citus_top_builddir)/src/include/citus_version.h
|
||||
extension: $(citus_top_builddir)/src/include/citus_version.h columnar
|
||||
$(MAKE) -C src/backend/distributed/ all
|
||||
install-extension: extension
|
||||
install-columnar: columnar
|
||||
$(MAKE) -C src/backend/columnar install
|
||||
install-extension: extension install-columnar
|
||||
$(MAKE) -C src/backend/distributed/ install
|
||||
install-headers: extension
|
||||
$(MKDIR_P) '$(DESTDIR)$(includedir_server)/distributed/'
|
||||
|
@ -27,6 +33,7 @@ install-headers: extension
|
|||
|
||||
clean-extension:
|
||||
$(MAKE) -C src/backend/distributed/ clean
|
||||
$(MAKE) -C src/backend/columnar/ clean
|
||||
clean-full:
|
||||
$(MAKE) -C src/backend/distributed/ clean-full
|
||||
.PHONY: extension install-extension clean-extension clean-full
|
||||
|
@ -35,6 +42,7 @@ install: install-extension install-headers
|
|||
install-downgrades:
|
||||
$(MAKE) -C src/backend/distributed/ install-downgrades
|
||||
install-all: install-headers
|
||||
$(MAKE) -C src/backend/columnar/ install-all
|
||||
$(MAKE) -C src/backend/distributed/ install-all
|
||||
|
||||
clean: clean-extension
|
||||
|
|
|
@ -2555,7 +2555,7 @@ if test -z "$version_num"; then
|
|||
as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5
|
||||
fi
|
||||
|
||||
if test "$version_num" != '12' -a "$version_num" != '13' -a "$version_num" != '14'; then
|
||||
if test "$version_num" != '13' -a "$version_num" != '14'; then
|
||||
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
|
||||
|
|
|
@ -74,7 +74,7 @@ if test -z "$version_num"; then
|
|||
AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.])
|
||||
fi
|
||||
|
||||
if test "$version_num" != '12' -a "$version_num" != '13' -a "$version_num" != '14'; then
|
||||
if test "$version_num" != '13' -a "$version_num" != '14'; then
|
||||
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
|
||||
else
|
||||
AC_MSG_NOTICE([building against PostgreSQL $version_num])
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
citus_subdir = src/backend/columnar
|
||||
citus_top_builddir = ../../..
|
||||
safestringlib_srcdir = $(citus_abs_top_srcdir)/vendor/safestringlib
|
||||
SUBDIRS = . safeclib
|
||||
SUBDIRS +=
|
||||
ENSURE_SUBDIRS_EXIST := $(shell mkdir -p $(SUBDIRS))
|
||||
OBJS += \
|
||||
$(patsubst $(citus_abs_srcdir)/%.c,%.o,$(foreach dir,$(SUBDIRS), $(sort $(wildcard $(citus_abs_srcdir)/$(dir)/*.c))))
|
||||
|
||||
MODULE_big = citus_columnar
|
||||
|
||||
PG_CPPFLAGS += -I$(libpq_srcdir) -I$(safestringlib_srcdir)/include
|
||||
|
||||
include $(citus_top_builddir)/Makefile.global
|
||||
|
||||
.PHONY: install-all
|
||||
install-all: install
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include "citus_version.h"
|
||||
#include "columnar/columnar.h"
|
||||
#include "columnar/columnar_tableam.h"
|
||||
|
||||
/* Default values for option parameters */
|
||||
#define DEFAULT_STRIPE_ROW_COUNT 150000
|
||||
|
@ -53,6 +54,14 @@ static const struct config_enum_entry columnar_compression_options[] =
|
|||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
void
|
||||
columnar_init(void)
|
||||
{
|
||||
columnar_init_gucs();
|
||||
columnar_tableam_init();
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
columnar_init_gucs()
|
||||
{
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include "postgres.h"
|
||||
|
||||
#include "funcapi.h"
|
||||
#include "pg_config.h"
|
||||
#include "access/nbtree.h"
|
||||
#include "access/table.h"
|
||||
#include "catalog/pg_am.h"
|
||||
|
|
|
@ -103,8 +103,8 @@ typedef struct IndexFetchColumnarData
|
|||
MemoryContext scanContext;
|
||||
} IndexFetchColumnarData;
|
||||
|
||||
|
||||
ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook = NULL;
|
||||
/* available to other extensions using find_rendezvous_variable() */
|
||||
static ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook = NULL;
|
||||
|
||||
static object_access_hook_type PrevObjectAccessHook = NULL;
|
||||
static ProcessUtility_hook_type PrevProcessUtilityHook = NULL;
|
||||
|
@ -1910,6 +1910,11 @@ ColumnarSubXactCallback(SubXactEvent event, SubTransactionId mySubid,
|
|||
void
|
||||
columnar_tableam_init()
|
||||
{
|
||||
ColumnarTableSetOptions_hook_type **ColumnarTableSetOptions_hook_ptr =
|
||||
(ColumnarTableSetOptions_hook_type **) find_rendezvous_variable(
|
||||
COLUMNAR_SETOPTIONS_HOOK_SYM);
|
||||
*ColumnarTableSetOptions_hook_ptr = &ColumnarTableSetOptions_hook;
|
||||
|
||||
RegisterXactCallback(ColumnarXactCallback, NULL);
|
||||
RegisterSubXactCallback(ColumnarSubXactCallback, NULL);
|
||||
|
||||
|
|
|
@ -18,13 +18,15 @@
|
|||
#include "citus_version.h"
|
||||
|
||||
#include "columnar/columnar.h"
|
||||
#include "columnar/mod.h"
|
||||
|
||||
#include "columnar/columnar_tableam.h"
|
||||
|
||||
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
void _PG_init(void);
|
||||
|
||||
void
|
||||
columnar_init(void)
|
||||
_PG_init(void)
|
||||
{
|
||||
columnar_init_gucs();
|
||||
columnar_tableam_init();
|
||||
columnar_init();
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../../../vendor/safestringlib/safeclib/
|
|
@ -19,8 +19,6 @@ DATA_built = $(generated_sql_files)
|
|||
|
||||
# directories with source files
|
||||
SUBDIRS = . commands connection ddl deparser executor metadata operations planner progress relay safeclib test transaction utils worker
|
||||
# columnar modules
|
||||
SUBDIRS += ../columnar
|
||||
# enterprise modules
|
||||
SUBDIRS +=
|
||||
|
||||
|
@ -84,7 +82,8 @@ endif
|
|||
.PHONY: clean-full install install-downgrades install-all
|
||||
|
||||
cleanup-before-install:
|
||||
rm -f $(DESTDIR)$(datadir)/$(datamoduledir)/citus*
|
||||
rm -f $(DESTDIR)$(datadir)/$(datamoduledir)/citus.control
|
||||
rm -f $(DESTDIR)$(datadir)/$(datamoduledir)/citus--*
|
||||
|
||||
install: cleanup-before-install
|
||||
|
||||
|
|
|
@ -22,6 +22,20 @@
|
|||
#include "utils/lsyscache.h"
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDefineAggregateStmt only qualifies the node with schema name.
|
||||
* We will handle the rest in the Postprocess phase.
|
||||
*/
|
||||
List *
|
||||
PreprocessDefineAggregateStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
QualifyTreeNode((Node *) node);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessDefineAggregateStmt actually creates the plan we need to execute for
|
||||
* aggregate propagation.
|
||||
|
@ -37,8 +51,6 @@
|
|||
List *
|
||||
PostprocessDefineAggregateStmt(Node *node, const char *queryString)
|
||||
{
|
||||
QualifyTreeNode((Node *) node);
|
||||
|
||||
DefineStmt *stmt = castNode(DefineStmt, node);
|
||||
|
||||
if (!ShouldPropagate())
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/reference_table_utils.h"
|
||||
#include "distributed/relation_access_tracking.h"
|
||||
#include "distributed/shared_library_init.h"
|
||||
#include "distributed/shard_utils.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
|
@ -687,7 +688,7 @@ ConvertTable(TableConversionState *con)
|
|||
strcmp(con->originalAccessMethod, "columnar") == 0)
|
||||
{
|
||||
ColumnarOptions options = { 0 };
|
||||
ReadColumnarOptions(con->relationId, &options);
|
||||
extern_ReadColumnarOptions(con->relationId, &options);
|
||||
|
||||
ColumnarTableDDLContext *context = (ColumnarTableDDLContext *) palloc0(
|
||||
sizeof(ColumnarTableDDLContext));
|
||||
|
@ -843,7 +844,7 @@ DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages)
|
|||
foreach_oid(indexId, indexIdList)
|
||||
{
|
||||
char *indexAmName = GetIndexAccessMethodName(indexId);
|
||||
if (ColumnarSupportsIndexAM(indexAmName))
|
||||
if (extern_ColumnarSupportsIndexAM(indexAmName))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1106,13 +1106,10 @@ DropDefaultExpressionsAndMoveOwnedSequenceOwnerships(Oid sourceRelationId,
|
|||
ExtractDefaultColumnsAndOwnedSequences(sourceRelationId, &columnNameList,
|
||||
&ownedSequenceIdList);
|
||||
|
||||
ListCell *columnNameCell = NULL;
|
||||
ListCell *ownedSequenceIdCell = NULL;
|
||||
forboth(columnNameCell, columnNameList, ownedSequenceIdCell, ownedSequenceIdList)
|
||||
char *columnName = NULL;
|
||||
Oid ownedSequenceId = InvalidOid;
|
||||
forboth_ptr_oid(columnName, columnNameList, ownedSequenceId, ownedSequenceIdList)
|
||||
{
|
||||
char *columnName = (char *) lfirst(columnNameCell);
|
||||
Oid ownedSequenceId = lfirst_oid(ownedSequenceIdCell);
|
||||
|
||||
DropDefaultColumnDefinition(sourceRelationId, columnName);
|
||||
|
||||
/* column might not own a sequence */
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/metadata/dependency.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
|
@ -300,6 +301,32 @@ PreprocessAlterCollationOwnerStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterCollationOwnerStmt is invoked after the owner has been changed locally.
|
||||
* Since changing the owner could result in new dependencies being found for this object
|
||||
* we re-ensure all the dependencies for the collation do exist.
|
||||
*
|
||||
* This is solely to propagate the new owner (and all its dependencies) if it was not
|
||||
* already distributed in the cluster.
|
||||
*/
|
||||
List *
|
||||
PostprocessAlterCollationOwnerStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_COLLATION);
|
||||
|
||||
ObjectAddress collationAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&collationAddress))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&collationAddress);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessRenameCollationStmt is called when the user is renaming the collation. The invocation happens
|
||||
* before the statement is applied locally.
|
||||
|
@ -562,6 +589,14 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString)
|
|||
ObjectAddress collationAddress =
|
||||
DefineCollationStmtObjectAddress(node, false);
|
||||
|
||||
DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(
|
||||
&collationAddress);
|
||||
if (errMsg != NULL)
|
||||
{
|
||||
RaiseDeferredError(errMsg, WARNING);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&collationAddress);
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "catalog/pg_opclass.h"
|
||||
#include "catalog/pg_proc.h"
|
||||
#include "catalog/pg_trigger.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "commands/defrem.h"
|
||||
#include "commands/extension.h"
|
||||
#include "commands/sequence.h"
|
||||
|
@ -579,7 +580,7 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
|
|||
* explicitly.
|
||||
*/
|
||||
void
|
||||
EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId)
|
||||
EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId)
|
||||
{
|
||||
List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE);
|
||||
citusTableIdList = list_append_unique_oid(citusTableIdList, ownerRelationId);
|
||||
|
@ -591,14 +592,11 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId)
|
|||
List *dependentSequenceList = NIL;
|
||||
GetDependentSequencesWithRelation(citusTableId, &attnumList,
|
||||
&dependentSequenceList, 0);
|
||||
ListCell *attnumCell = NULL;
|
||||
ListCell *dependentSequenceCell = NULL;
|
||||
forboth(attnumCell, attnumList, dependentSequenceCell,
|
||||
dependentSequenceList)
|
||||
AttrNumber currentAttnum = InvalidAttrNumber;
|
||||
Oid currentSeqOid = InvalidOid;
|
||||
forboth_int_oid(currentAttnum, attnumList, currentSeqOid,
|
||||
dependentSequenceList)
|
||||
{
|
||||
AttrNumber currentAttnum = lfirst_int(attnumCell);
|
||||
Oid currentSeqOid = lfirst_oid(dependentSequenceCell);
|
||||
|
||||
/*
|
||||
* If another distributed table is using the same sequence
|
||||
* in one of its column defaults, make sure the types of the
|
||||
|
@ -606,9 +604,9 @@ EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId)
|
|||
*/
|
||||
if (currentSeqOid == seqOid)
|
||||
{
|
||||
Oid currentSeqTypId = GetAttributeTypeOid(citusTableId,
|
||||
currentAttnum);
|
||||
if (seqTypId != currentSeqTypId)
|
||||
Oid currentAttributeTypId = GetAttributeTypeOid(citusTableId,
|
||||
currentAttnum);
|
||||
if (attributeTypeId != currentAttributeTypId)
|
||||
{
|
||||
char *sequenceName = generate_qualified_relation_name(
|
||||
seqOid);
|
||||
|
@ -674,28 +672,37 @@ static void
|
|||
EnsureDistributedSequencesHaveOneType(Oid relationId, List *dependentSequenceList,
|
||||
List *attnumList)
|
||||
{
|
||||
ListCell *attnumCell = NULL;
|
||||
ListCell *dependentSequenceCell = NULL;
|
||||
forboth(attnumCell, attnumList, dependentSequenceCell, dependentSequenceList)
|
||||
AttrNumber attnum = InvalidAttrNumber;
|
||||
Oid sequenceOid = InvalidOid;
|
||||
forboth_int_oid(attnum, attnumList, sequenceOid, dependentSequenceList)
|
||||
{
|
||||
AttrNumber attnum = lfirst_int(attnumCell);
|
||||
Oid sequenceOid = lfirst_oid(dependentSequenceCell);
|
||||
|
||||
/*
|
||||
* We should make sure that the type of the column that uses
|
||||
* that sequence is supported
|
||||
*/
|
||||
Oid seqTypId = GetAttributeTypeOid(relationId, attnum);
|
||||
EnsureSequenceTypeSupported(sequenceOid, seqTypId, relationId);
|
||||
Oid attributeTypeId = GetAttributeTypeOid(relationId, attnum);
|
||||
EnsureSequenceTypeSupported(sequenceOid, attributeTypeId, relationId);
|
||||
|
||||
/*
|
||||
* Alter the sequence's data type in the coordinator if needed.
|
||||
*
|
||||
* First, we should only change the sequence type if the column
|
||||
* is a supported sequence type. For example, if a sequence is used
|
||||
* in an expression which then becomes a text, we should not try to
|
||||
* alter the sequence type to text. Postgres only supports int2, int4
|
||||
* and int8 as the sequence type.
|
||||
*
|
||||
* A sequence's type is bigint by default and it doesn't change even if
|
||||
* it's used in an int column. We should change the type if needed,
|
||||
* and not allow future ALTER SEQUENCE ... TYPE ... commands for
|
||||
* sequences used as defaults in distributed tables
|
||||
* sequences used as defaults in distributed tables.
|
||||
*/
|
||||
AlterSequenceType(sequenceOid, seqTypId);
|
||||
if (attributeTypeId == INT2OID ||
|
||||
attributeTypeId == INT4OID ||
|
||||
attributeTypeId == INT8OID)
|
||||
{
|
||||
AlterSequenceType(sequenceOid, attributeTypeId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -158,6 +158,8 @@ EnsureDependenciesCanBeDistributed(const ObjectAddress *objectAddress)
|
|||
|
||||
if (depError != NULL)
|
||||
{
|
||||
/* override error detail as it is not applicable here*/
|
||||
depError->detail = NULL;
|
||||
RaiseDeferredError(depError, ERROR);
|
||||
}
|
||||
}
|
||||
|
@ -398,6 +400,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
|
|||
return CreateTextSearchConfigDDLCommandsIdempotent(dependency);
|
||||
}
|
||||
|
||||
case OCLASS_TSDICT:
|
||||
{
|
||||
return CreateTextSearchDictDDLCommandsIdempotent(dependency);
|
||||
}
|
||||
|
||||
case OCLASS_TYPE:
|
||||
{
|
||||
return CreateTypeDDLCommandsIdempotent(dependency);
|
||||
|
|
|
@ -37,14 +37,14 @@ static DistributeObjectOps Aggregate_AlterOwner = {
|
|||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
.qualify = QualifyAlterFunctionOwnerStmt,
|
||||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.postprocess = PostprocessAlterFunctionOwnerStmt,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Aggregate_Define = {
|
||||
.deparse = NULL,
|
||||
.qualify = QualifyDefineAggregateStmt,
|
||||
.preprocess = NULL,
|
||||
.preprocess = PreprocessDefineAggregateStmt,
|
||||
.postprocess = PostprocessDefineAggregateStmt,
|
||||
.address = DefineAggregateStmtObjectAddress,
|
||||
.markDistributed = true,
|
||||
|
@ -269,7 +269,7 @@ static DistributeObjectOps Collation_AlterOwner = {
|
|||
.deparse = DeparseAlterCollationOwnerStmt,
|
||||
.qualify = QualifyAlterCollationOwnerStmt,
|
||||
.preprocess = PreprocessAlterCollationOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.postprocess = PostprocessAlterCollationOwnerStmt,
|
||||
.address = AlterCollationOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
|
@ -373,7 +373,7 @@ static DistributeObjectOps Function_AlterOwner = {
|
|||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
.qualify = QualifyAlterFunctionOwnerStmt,
|
||||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.postprocess = PostprocessAlterFunctionOwnerStmt,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
|
@ -437,7 +437,7 @@ static DistributeObjectOps Procedure_AlterOwner = {
|
|||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
.qualify = QualifyAlterFunctionOwnerStmt,
|
||||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.postprocess = PostprocessAlterFunctionOwnerStmt,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
|
@ -538,7 +538,7 @@ static DistributeObjectOps TextSearchConfig_Comment = {
|
|||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchConfig_Define = {
|
||||
.deparse = DeparseCreateTextSearchStmt,
|
||||
.deparse = DeparseCreateTextSearchConfigurationStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = NULL,
|
||||
.postprocess = PostprocessCreateTextSearchConfigurationStmt,
|
||||
|
@ -561,6 +561,62 @@ static DistributeObjectOps TextSearchConfig_Rename = {
|
|||
.address = RenameTextSearchConfigurationStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_Alter = {
|
||||
.deparse = DeparseAlterTextSearchDictionaryStmt,
|
||||
.qualify = QualifyAlterTextSearchDictionaryStmt,
|
||||
.preprocess = PreprocessAlterTextSearchDictionaryStmt,
|
||||
.postprocess = NULL,
|
||||
.address = AlterTextSearchDictionaryStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_AlterObjectSchema = {
|
||||
.deparse = DeparseAlterTextSearchDictionarySchemaStmt,
|
||||
.qualify = QualifyAlterTextSearchDictionarySchemaStmt,
|
||||
.preprocess = PreprocessAlterTextSearchDictionarySchemaStmt,
|
||||
.postprocess = PostprocessAlterTextSearchDictionarySchemaStmt,
|
||||
.address = AlterTextSearchDictionarySchemaStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_AlterOwner = {
|
||||
.deparse = DeparseAlterTextSearchDictionaryOwnerStmt,
|
||||
.qualify = QualifyAlterTextSearchDictionaryOwnerStmt,
|
||||
.preprocess = PreprocessAlterTextSearchDictionaryOwnerStmt,
|
||||
.postprocess = PostprocessAlterTextSearchDictionaryOwnerStmt,
|
||||
.address = AlterTextSearchDictOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_Comment = {
|
||||
.deparse = DeparseTextSearchDictionaryCommentStmt,
|
||||
.qualify = QualifyTextSearchDictionaryCommentStmt,
|
||||
.preprocess = PreprocessTextSearchDictionaryCommentStmt,
|
||||
.postprocess = NULL,
|
||||
.address = TextSearchDictCommentObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_Define = {
|
||||
.deparse = DeparseCreateTextSearchDictionaryStmt,
|
||||
.qualify = NULL,
|
||||
.preprocess = NULL,
|
||||
.postprocess = PostprocessCreateTextSearchDictionaryStmt,
|
||||
.address = CreateTextSearchDictObjectAddress,
|
||||
.markDistributed = true,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_Drop = {
|
||||
.deparse = DeparseDropTextSearchDictionaryStmt,
|
||||
.qualify = QualifyDropTextSearchDictionaryStmt,
|
||||
.preprocess = PreprocessDropTextSearchDictionaryStmt,
|
||||
.postprocess = NULL,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps TextSearchDict_Rename = {
|
||||
.deparse = DeparseRenameTextSearchDictionaryStmt,
|
||||
.qualify = QualifyRenameTextSearchDictionaryStmt,
|
||||
.preprocess = PreprocessRenameTextSearchDictionaryStmt,
|
||||
.postprocess = NULL,
|
||||
.address = RenameTextSearchDictionaryStmtObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
static DistributeObjectOps Trigger_AlterObjectDepends = {
|
||||
.deparse = NULL,
|
||||
.qualify = NULL,
|
||||
|
@ -581,7 +637,7 @@ static DistributeObjectOps Routine_AlterOwner = {
|
|||
.deparse = DeparseAlterFunctionOwnerStmt,
|
||||
.qualify = QualifyAlterFunctionOwnerStmt,
|
||||
.preprocess = PreprocessAlterFunctionOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.postprocess = PostprocessAlterFunctionOwnerStmt,
|
||||
.address = AlterFunctionOwnerObjectAddress,
|
||||
.markDistributed = false,
|
||||
};
|
||||
|
@ -647,7 +703,7 @@ static DistributeObjectOps Statistics_AlterOwner = {
|
|||
.deparse = DeparseAlterStatisticsOwnerStmt,
|
||||
.qualify = QualifyAlterStatisticsOwnerStmt,
|
||||
.preprocess = PreprocessAlterStatisticsOwnerStmt,
|
||||
.postprocess = NULL,
|
||||
.postprocess = PostprocessAlterStatisticsOwnerStmt,
|
||||
.address = NULL,
|
||||
.markDistributed = false,
|
||||
};
|
||||
|
@ -872,6 +928,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_AlterObjectSchema;
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return &TextSearchDict_AlterObjectSchema;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
return &Type_AlterObjectSchema;
|
||||
|
@ -934,6 +995,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_AlterOwner;
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return &TextSearchDict_AlterOwner;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
return &Type_AlterOwner;
|
||||
|
@ -1020,6 +1086,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_Alter;
|
||||
}
|
||||
|
||||
case T_AlterTSDictionaryStmt:
|
||||
{
|
||||
return &TextSearchDict_Alter;
|
||||
}
|
||||
|
||||
case T_ClusterStmt:
|
||||
{
|
||||
return &Any_Cluster;
|
||||
|
@ -1035,6 +1106,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_Comment;
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return &TextSearchDict_Comment;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
return &NoDistributeOps;
|
||||
|
@ -1107,6 +1183,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_Define;
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return &TextSearchDict_Define;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
return &NoDistributeOps;
|
||||
|
@ -1189,6 +1270,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_Drop;
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return &TextSearchDict_Drop;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
return &Type_Drop;
|
||||
|
@ -1293,6 +1379,11 @@ GetDistributeObjectOps(Node *node)
|
|||
return &TextSearchConfig_Rename;
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return &TextSearchDict_Rename;
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
return &Type_Rename;
|
||||
|
|
|
@ -769,13 +769,23 @@ RecreateExtensionStmt(Oid extensionOid)
|
|||
|
||||
/* make DefEleme for extensionSchemaName */
|
||||
Node *schemaNameArg = (Node *) makeString(extensionSchemaName);
|
||||
|
||||
DefElem *schemaDefElement = makeDefElem("schema", schemaNameArg, location);
|
||||
|
||||
/* append the schema name DefElem finally */
|
||||
createExtensionStmt->options = lappend(createExtensionStmt->options,
|
||||
schemaDefElement);
|
||||
|
||||
char *extensionVersion = get_extension_version(extensionOid);
|
||||
if (extensionVersion != NULL)
|
||||
{
|
||||
Node *extensionVersionArg = (Node *) makeString(extensionVersion);
|
||||
DefElem *extensionVersionElement =
|
||||
makeDefElem("new_version", extensionVersionArg, location);
|
||||
|
||||
createExtensionStmt->options = lappend(createExtensionStmt->options,
|
||||
extensionVersionElement);
|
||||
}
|
||||
|
||||
return (Node *) createExtensionStmt;
|
||||
}
|
||||
|
||||
|
|
|
@ -410,9 +410,8 @@ EnsureReferencingTableNotReplicated(Oid referencingTableId)
|
|||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot create foreign key constraint"),
|
||||
errdetail("Citus Community Edition currently supports "
|
||||
"foreign key constraints only for "
|
||||
"\"citus.shard_replication_factor = 1\"."),
|
||||
errdetail("Citus currently supports foreign key constraints "
|
||||
"only for \"citus.shard_replication_factor = 1\"."),
|
||||
errhint("Please change \"citus.shard_replication_factor to "
|
||||
"1\". To learn more about using foreign keys with "
|
||||
"other replication factors, please contact us at "
|
||||
|
|
|
@ -966,6 +966,14 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
|||
insertorderbyat = agg->aggnumdirectargs;
|
||||
}
|
||||
|
||||
/*
|
||||
* For zero-argument aggregate, write * in place of the list of arguments
|
||||
*/
|
||||
if (numargs == 0)
|
||||
{
|
||||
appendStringInfo(&buf, "*");
|
||||
}
|
||||
|
||||
for (i = 0; i < numargs; i++)
|
||||
{
|
||||
Oid argtype = argtypes[i];
|
||||
|
@ -1446,7 +1454,21 @@ DefineAggregateStmtObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
else
|
||||
{
|
||||
objectWithArgs->objargs = list_make1(makeTypeName("anyelement"));
|
||||
DefElem *defItem = NULL;
|
||||
foreach_ptr(defItem, stmt->definition)
|
||||
{
|
||||
/*
|
||||
* If no explicit args are given, pg includes basetype in the signature.
|
||||
* If the basetype given is a type, like int4, we should include it in the
|
||||
* signature. In that case, defItem->arg would be a TypeName.
|
||||
* If the basetype given is a string, like "ANY", we shouldn't include it.
|
||||
*/
|
||||
if (strcmp(defItem->defname, "basetype") == 0 && IsA(defItem->arg, TypeName))
|
||||
{
|
||||
objectWithArgs->objargs = lappend(objectWithArgs->objargs,
|
||||
defItem->arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return FunctionToObjectAddress(OBJECT_AGGREGATE, objectWithArgs, missing_ok);
|
||||
|
@ -1584,6 +1606,32 @@ PreprocessAlterFunctionOwnerStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterFunctionOwnerStmt is invoked after the owner has been changed locally.
|
||||
* Since changing the owner could result in new dependencies being found for this object
|
||||
* we re-ensure all the dependencies for the function do exist.
|
||||
*
|
||||
* This is solely to propagate the new owner (and all its dependencies) if it was not
|
||||
* already distributed in the cluster.
|
||||
*/
|
||||
List *
|
||||
PostprocessAlterFunctionOwnerStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
AssertObjectTypeIsFunctional(stmt->objectType);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateAlterFunction(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&address);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropFunctionStmt gets called during the planning phase of a DROP FUNCTION statement
|
||||
* and returns a list of DDLJob's that will drop any distributed functions from the
|
||||
|
|
|
@ -44,6 +44,7 @@ static ObjectAddress GetObjectAddressBySchemaName(char *schemaName, bool missing
|
|||
static List * FilterDistributedSchemas(List *schemas);
|
||||
static bool SchemaHasDistributedTableWithFKey(char *schemaName);
|
||||
static bool ShouldPropagateCreateSchemaStmt(void);
|
||||
static List * GetGrantCommandsFromCreateSchemaStmt(Node *node);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -63,13 +64,17 @@ PreprocessCreateSchemaStmt(Node *node, const char *queryString,
|
|||
|
||||
EnsureSequentialMode(OBJECT_SCHEMA);
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
List *commands = list_make1(DISABLE_DDL_PROPAGATION);
|
||||
|
||||
/* deparse sql*/
|
||||
const char *sql = DeparseTreeNode(node);
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
commands = lappend(commands, (void *) sql);
|
||||
|
||||
commands = list_concat(commands, GetGrantCommandsFromCreateSchemaStmt(node));
|
||||
|
||||
commands = lappend(commands, ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
@ -223,7 +228,24 @@ CreateSchemaStmtObjectAddress(Node *node, bool missing_ok)
|
|||
{
|
||||
CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node);
|
||||
|
||||
return GetObjectAddressBySchemaName(stmt->schemaname, missing_ok);
|
||||
StringInfoData schemaName = { 0 };
|
||||
initStringInfo(&schemaName);
|
||||
|
||||
if (stmt->schemaname == NULL)
|
||||
{
|
||||
/*
|
||||
* If the schema name is not provided, the schema will be created
|
||||
* with the name of the authorizated user.
|
||||
*/
|
||||
Assert(stmt->authrole != NULL);
|
||||
appendStringInfoString(&schemaName, RoleSpecString(stmt->authrole, true));
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfoString(&schemaName, stmt->schemaname);
|
||||
}
|
||||
|
||||
return GetObjectAddressBySchemaName(schemaName.data, missing_ok);
|
||||
}
|
||||
|
||||
|
||||
|
@ -375,3 +397,44 @@ ShouldPropagateCreateSchemaStmt()
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetGrantCommandsFromCreateSchemaStmt takes a CreateSchemaStmt and returns the
|
||||
* list of deparsed queries of the inner GRANT commands of the given statement.
|
||||
* Ignores commands other than GRANT statements.
|
||||
*/
|
||||
static List *
|
||||
GetGrantCommandsFromCreateSchemaStmt(Node *node)
|
||||
{
|
||||
List *commands = NIL;
|
||||
CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node);
|
||||
|
||||
Node *element = NULL;
|
||||
foreach_ptr(element, stmt->schemaElts)
|
||||
{
|
||||
if (!IsA(element, GrantStmt))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
GrantStmt *grantStmt = castNode(GrantStmt, element);
|
||||
|
||||
switch (grantStmt->objtype)
|
||||
{
|
||||
/* we only propagate GRANT ON SCHEMA in community */
|
||||
case OBJECT_SCHEMA:
|
||||
{
|
||||
commands = lappend(commands, DeparseGrantOnSchemaStmt(element));
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
|
|
@ -427,6 +427,37 @@ PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString,
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterStatisticsOwnerStmt is invoked after the owner has been changed locally.
|
||||
* Since changing the owner could result in new dependencies being found for this object
|
||||
* we re-ensure all the dependencies for the statistics do exist.
|
||||
*
|
||||
* This is solely to propagate the new owner (and all its dependencies) if it was not
|
||||
* already distributed in the cluster.
|
||||
*/
|
||||
List *
|
||||
PostprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_STATISTIC_EXT);
|
||||
|
||||
Oid statsOid = get_statistics_object_oid((List *) stmt->object, false);
|
||||
Oid relationId = GetRelIdByStatsOid(statsOid);
|
||||
|
||||
if (!IsCitusTable(relationId) || !ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
ObjectAddress statisticsAddress = { 0 };
|
||||
ObjectAddressSet(statisticsAddress, StatisticExtRelationId, statsOid);
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&statisticsAddress);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetExplicitStatisticsCommandList returns the list of DDL commands to create
|
||||
* or alter statistics that are explicitly created for the table with relationId.
|
||||
|
|
|
@ -3127,13 +3127,10 @@ InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
|
|||
|
||||
List *taskList = NIL;
|
||||
|
||||
ListCell *leftShardCell = NULL;
|
||||
ListCell *rightShardCell = NULL;
|
||||
forboth(leftShardCell, leftShardList, rightShardCell, rightShardList)
|
||||
ShardInterval *leftShardInterval = NULL;
|
||||
ShardInterval *rightShardInterval = NULL;
|
||||
forboth_ptr(leftShardInterval, leftShardList, rightShardInterval, rightShardList)
|
||||
{
|
||||
ShardInterval *leftShardInterval = (ShardInterval *) lfirst(leftShardCell);
|
||||
ShardInterval *rightShardInterval = (ShardInterval *) lfirst(rightShardCell);
|
||||
|
||||
uint64 leftShardId = leftShardInterval->shardId;
|
||||
uint64 rightShardId = rightShardInterval->shardId;
|
||||
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
#include "catalog/pg_ts_config_map.h"
|
||||
#include "catalog/pg_ts_dict.h"
|
||||
#include "catalog/pg_ts_parser.h"
|
||||
#include "catalog/pg_ts_template.h"
|
||||
#include "commands/comment.h"
|
||||
#include "commands/defrem.h"
|
||||
#include "commands/extension.h"
|
||||
#include "fmgr.h"
|
||||
#include "nodes/makefuncs.h"
|
||||
|
@ -32,6 +34,7 @@
|
|||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata/dependency.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
|
@ -40,13 +43,19 @@
|
|||
|
||||
|
||||
static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt);
|
||||
static List * GetDistributedTextSearchDictionaryNames(DropStmt *stmt);
|
||||
static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid);
|
||||
static DefineStmt * GetTextSearchDictionaryDefineStmt(Oid tsdictOid);
|
||||
static List * GetTextSearchDictionaryInitOptions(HeapTuple tup, Form_pg_ts_dict dict);
|
||||
static List * GetTextSearchConfigCommentStmt(Oid tsconfigOid);
|
||||
static List * GetTextSearchDictionaryCommentStmt(Oid tsconfigOid);
|
||||
static List * get_ts_parser_namelist(Oid tsparserOid);
|
||||
static List * GetTextSearchConfigMappingStmt(Oid tsconfigOid);
|
||||
static List * GetTextSearchConfigOwnerStmts(Oid tsconfigOid);
|
||||
static List * GetTextSearchDictionaryOwnerStmts(Oid tsdictOid);
|
||||
|
||||
static List * get_ts_dict_namelist(Oid tsdictOid);
|
||||
static List * get_ts_template_namelist(Oid tstemplateOid);
|
||||
static Oid get_ts_config_parser_oid(Oid tsconfigOid);
|
||||
static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype);
|
||||
|
||||
|
@ -83,6 +92,14 @@ PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString
|
|||
EnsureSequentialMode(OBJECT_TSCONFIGURATION);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
|
||||
DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&address);
|
||||
if (errMsg != NULL)
|
||||
{
|
||||
RaiseDeferredError(errMsg, WARNING);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&address);
|
||||
|
||||
/*
|
||||
|
@ -99,6 +116,56 @@ PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessCreateTextSearchDictionaryStmt is called after the TEXT SEARCH DICTIONARY has been
|
||||
* created locally.
|
||||
*/
|
||||
List *
|
||||
PostprocessCreateTextSearchDictionaryStmt(Node *node, const char *queryString)
|
||||
{
|
||||
DefineStmt *stmt = castNode(DefineStmt, node);
|
||||
Assert(stmt->kind == OBJECT_TSDICTIONARY);
|
||||
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/* check creation against multi-statement transaction policy */
|
||||
if (!ShouldPropagateCreateInCoordinatedTransction())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
|
||||
DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&address);
|
||||
if (errMsg != NULL)
|
||||
{
|
||||
RaiseDeferredError(errMsg, WARNING);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&address);
|
||||
|
||||
QualifyTreeNode(node);
|
||||
const char *createTSDictionaryStmtSql = DeparseTreeNode(node);
|
||||
|
||||
/*
|
||||
* To prevent recursive propagation in mx architecture, we disable ddl
|
||||
* propagation before sending the command to workers.
|
||||
*/
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createTSDictionaryStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
List *
|
||||
GetCreateTextSearchConfigStatements(const ObjectAddress *address)
|
||||
{
|
||||
|
@ -122,6 +189,25 @@ GetCreateTextSearchConfigStatements(const ObjectAddress *address)
|
|||
}
|
||||
|
||||
|
||||
List *
|
||||
GetCreateTextSearchDictionaryStatements(const ObjectAddress *address)
|
||||
{
|
||||
Assert(address->classId == TSDictionaryRelationId);
|
||||
List *stmts = NIL;
|
||||
|
||||
/* CREATE TEXT SEARCH DICTIONARY ...*/
|
||||
stmts = lappend(stmts, GetTextSearchDictionaryDefineStmt(address->objectId));
|
||||
|
||||
/* ALTER TEXT SEARCH DICTIONARY ... OWNER TO ...*/
|
||||
stmts = list_concat(stmts, GetTextSearchDictionaryOwnerStmts(address->objectId));
|
||||
|
||||
/* COMMENT ON TEXT SEARCH DICTIONARY ... */
|
||||
stmts = list_concat(stmts, GetTextSearchDictionaryCommentStmt(address->objectId));
|
||||
|
||||
return stmts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateTextSearchConfigDDLCommandsIdempotent creates a list of ddl commands to recreate
|
||||
* a TEXT SERACH CONFIGURATION object in an idempotent manner on workers.
|
||||
|
@ -135,9 +221,22 @@ CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateTextSearchDictDDLCommandsIdempotent creates a list of ddl commands to recreate
|
||||
* a TEXT SEARCH CONFIGURATION object in an idempotent manner on workers.
|
||||
*/
|
||||
List *
|
||||
CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address)
|
||||
{
|
||||
List *stmts = GetCreateTextSearchDictionaryStatements(address);
|
||||
List *sqls = DeparseTreeNodes(stmts);
|
||||
return list_make1(WrapCreateOrReplaceList(sqls));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to
|
||||
* the workers. After we have dropped the schema's locally they also got removed from
|
||||
* the workers. After we have dropped the configurations locally they also got removed from
|
||||
* pg_dist_object so it is important to do all distribution checks before the change is
|
||||
* made locally.
|
||||
*/
|
||||
|
@ -182,13 +281,64 @@ PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString,
|
|||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessDropTextSearchDictionaryStmt prepares the statements we need to send to
|
||||
* the workers. After we have dropped the dictionaries locally they also got removed from
|
||||
* pg_dist_object so it is important to do all distribution checks before the change is
|
||||
* made locally.
|
||||
*/
|
||||
List *
|
||||
PreprocessDropTextSearchDictionaryStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
DropStmt *stmt = castNode(DropStmt, node);
|
||||
Assert(stmt->removeType == OBJECT_TSDICTIONARY);
|
||||
|
||||
if (!ShouldPropagate())
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
List *distributedObjects = GetDistributedTextSearchDictionaryNames(stmt);
|
||||
if (list_length(distributedObjects) == 0)
|
||||
{
|
||||
/* no distributed objects to remove */
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
/*
|
||||
* Temporarily replace the list of objects being dropped with only the list
|
||||
* containing the distributed objects. After we have created the sql statement we
|
||||
* restore the original list of objects to execute on locally.
|
||||
*
|
||||
* Because searchpaths on coordinator and workers might not be in sync we fully
|
||||
* qualify the list before deparsing. This is safe because qualification doesn't
|
||||
* change the original names in place, but insteads creates new ones.
|
||||
*/
|
||||
List *originalObjects = stmt->objects;
|
||||
stmt->objects = distributedObjects;
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
const char *dropStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
stmt->objects = originalObjects;
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) dropStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDistributedTextSearchConfigurationNames iterates over all text search configurations
|
||||
* dropped, and create a list containign all configurations that are distributed.
|
||||
* dropped, and create a list containing all configurations that are distributed.
|
||||
*/
|
||||
static List *
|
||||
GetDistributedTextSearchConfigurationNames(DropStmt *stmt)
|
||||
|
@ -200,7 +350,7 @@ GetDistributedTextSearchConfigurationNames(DropStmt *stmt)
|
|||
Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok);
|
||||
if (!OidIsValid(tsconfigOid))
|
||||
{
|
||||
/* skip missing configuration names, they can't be dirstibuted */
|
||||
/* skip missing configuration names, they can't be distributed */
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -216,6 +366,36 @@ GetDistributedTextSearchConfigurationNames(DropStmt *stmt)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetDistributedTextSearchDictionaryNames iterates over all text search dictionaries
|
||||
* dropped, and create a list containing all dictionaries that are distributed.
|
||||
*/
|
||||
static List *
|
||||
GetDistributedTextSearchDictionaryNames(DropStmt *stmt)
|
||||
{
|
||||
List *objName = NULL;
|
||||
List *distributedObjects = NIL;
|
||||
foreach_ptr(objName, stmt->objects)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(objName, stmt->missing_ok);
|
||||
if (!OidIsValid(tsdictOid))
|
||||
{
|
||||
/* skip missing dictionary names, they can't be distributed */
|
||||
continue;
|
||||
}
|
||||
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TSDictionaryRelationId, tsdictOid);
|
||||
if (!IsObjectDistributed(&address))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
distributedObjects = lappend(distributedObjects, objName);
|
||||
}
|
||||
return distributedObjects;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered
|
||||
* is distributed in the cluster. If that is the case it will prepare the list of commands
|
||||
|
@ -243,7 +423,38 @@ PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString,
|
|||
(void *) alterStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTextSearchDictionaryStmt verifies if the dictionary being altered is
|
||||
* distributed in the cluster. If that is the case it will prepare the list of commands to
|
||||
* send to the worker to apply the same changes remote.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterTextSearchDictionaryStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
const char *alterStmtSql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) alterStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -276,7 +487,40 @@ PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString,
|
|||
(void *) ddlCommand,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessRenameTextSearchDictionaryStmt verifies if the dictionary being altered
|
||||
* is distributed in the cluster. If that is the case it will prepare the list of commands
|
||||
* to send to the worker to apply the same changes remote.
|
||||
*/
|
||||
List *
|
||||
PreprocessRenameTextSearchDictionaryStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
RenameStmt *stmt = castNode(RenameStmt, node);
|
||||
Assert(stmt->renameType == OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
|
||||
char *ddlCommand = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) ddlCommand,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -310,7 +554,41 @@ PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *querySt
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTextSearchDictionarySchemaStmt verifies if the dictionary being
|
||||
* altered is distributed in the cluster. If that is the case it will prepare the list of
|
||||
* commands to send to the worker to apply the same changes remote.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterTextSearchDictionarySchemaStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext)
|
||||
{
|
||||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
stmt->missing_ok);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -341,6 +619,33 @@ PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryS
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterTextSearchDictionarySchemaStmt is invoked after the schema has been
|
||||
* changed locally. Since changing the schema could result in new dependencies being found
|
||||
* for this object we re-ensure all the dependencies for the dictionary do exist. This
|
||||
* is solely to propagate the new schema (and all its dependencies) if it was not already
|
||||
* distributed in the cluster.
|
||||
*/
|
||||
List *
|
||||
PostprocessAlterTextSearchDictionarySchemaStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt,
|
||||
stmt->missing_ok);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/* dependencies have changed (schema) let's ensure they exist */
|
||||
EnsureDependenciesExistOnAllNodes(&address);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed
|
||||
* configuration to the workers. Since comments for configurations are promenently shown
|
||||
|
@ -370,7 +675,40 @@ PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString
|
|||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands);
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessTextSearchDictionaryCommentStmt propagates any comment on a distributed
|
||||
* dictionary to the workers. Since comments for dictionaries are promenently shown
|
||||
* when listing all text search dictionaries this is purely a cosmetic thing when
|
||||
* running in MX.
|
||||
*/
|
||||
List *
|
||||
PreprocessTextSearchDictionaryCommentStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext)
|
||||
{
|
||||
CommentStmt *stmt = castNode(CommentStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
const char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -407,6 +745,39 @@ PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryStr
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PreprocessAlterTextSearchDictionaryOwnerStmt verifies if the dictionary being
|
||||
* altered is distributed in the cluster. If that is the case it will prepare the list of
|
||||
* commands to send to the worker to apply the same changes remote.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterTextSearchDictionaryOwnerStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
EnsureCoordinator();
|
||||
EnsureSequentialMode(OBJECT_TSDICTIONARY);
|
||||
|
||||
QualifyTreeNode((Node *) stmt);
|
||||
char *sql = DeparseTreeNode((Node *) stmt);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) sql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been
|
||||
* changed locally. Since changing the owner could result in new dependencies being found
|
||||
|
@ -433,6 +804,32 @@ PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *querySt
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* PostprocessAlterTextSearchDictionaryOwnerStmt is invoked after the owner has been
|
||||
* changed locally. Since changing the owner could result in new dependencies being found
|
||||
* for this object we re-ensure all the dependencies for the dictionary do exist. This
|
||||
* is solely to propagate the new owner (and all its dependencies) if it was not already
|
||||
* distributed in the cluster.
|
||||
*/
|
||||
List *
|
||||
PostprocessAlterTextSearchDictionaryOwnerStmt(Node *node, const char *queryString)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false);
|
||||
if (!ShouldPropagateObject(&address))
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/* dependencies have changed (owner) let's ensure they exist */
|
||||
EnsureDependenciesExistOnAllNodes(&address);
|
||||
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION
|
||||
* based on the configuration as defined in the catalog identified by tsconfigOid.
|
||||
|
@ -465,6 +862,65 @@ GetTextSearchConfigDefineStmt(Oid tsconfigOid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchDictionaryDefineStmt returns the DefineStmt for a TEXT SEARCH DICTIONARY
|
||||
* based on the dictionary as defined in the catalog identified by tsdictOid.
|
||||
*
|
||||
* This statement will contain the template along with all initilaization options.
|
||||
*/
|
||||
static DefineStmt *
|
||||
GetTextSearchDictionaryDefineStmt(Oid tsdictOid)
|
||||
{
|
||||
HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid));
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for text search dictionary %u",
|
||||
tsdictOid);
|
||||
}
|
||||
Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup);
|
||||
|
||||
DefineStmt *stmt = makeNode(DefineStmt);
|
||||
stmt->kind = OBJECT_TSDICTIONARY;
|
||||
stmt->defnames = get_ts_dict_namelist(tsdictOid);
|
||||
stmt->definition = GetTextSearchDictionaryInitOptions(tup, dict);
|
||||
|
||||
ReleaseSysCache(tup);
|
||||
return stmt;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchDictionaryInitOptions returns the list of DefElem for the initialization
|
||||
* options for a TEXT SEARCH DICTIONARY.
|
||||
*
|
||||
* The initialization options contain both the template name, and template specific key,
|
||||
* value pairs that are supplied when the dictionary was first created.
|
||||
*/
|
||||
static List *
|
||||
GetTextSearchDictionaryInitOptions(HeapTuple tup, Form_pg_ts_dict dict)
|
||||
{
|
||||
List *templateNameList = get_ts_template_namelist(dict->dicttemplate);
|
||||
TypeName *templateTypeName = makeTypeNameFromNameList(templateNameList);
|
||||
DefElem *templateDefElem = makeDefElem("template", (Node *) templateTypeName, -1);
|
||||
|
||||
Relation TSDictionaryRelation = table_open(TSDictionaryRelationId, AccessShareLock);
|
||||
TupleDesc TSDictDescription = RelationGetDescr(TSDictionaryRelation);
|
||||
bool isnull = false;
|
||||
Datum dictinitoption = heap_getattr(tup, Anum_pg_ts_dict_dictinitoption,
|
||||
TSDictDescription, &isnull);
|
||||
|
||||
List *initOptionDefElemList = NIL;
|
||||
if (!isnull)
|
||||
{
|
||||
initOptionDefElemList = deserialize_deflist(dictinitoption);
|
||||
}
|
||||
|
||||
table_close(TSDictionaryRelation, AccessShareLock);
|
||||
|
||||
return lcons(templateDefElem, initOptionDefElemList);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchConfigCommentStmt returns a list containing all entries to recreate a
|
||||
* comment on the configuration identified by tsconfigOid. The list could be empty if
|
||||
|
@ -492,6 +948,33 @@ GetTextSearchConfigCommentStmt(Oid tsconfigOid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchDictionaryCommentStmt returns a list containing all entries to recreate a
|
||||
* comment on the dictionary identified by tsconfigOid. The list could be empty if
|
||||
* there is no comment on a dictionary.
|
||||
*
|
||||
* The reason for a list is for easy use when building a list of all statements to invoke
|
||||
* to recreate the text search dictionary. An empty list can easily be concatinated
|
||||
* without inspection, contrary to a NULL ptr if we would return the CommentStmt struct.
|
||||
*/
|
||||
static List *
|
||||
GetTextSearchDictionaryCommentStmt(Oid tsdictOid)
|
||||
{
|
||||
char *comment = GetComment(tsdictOid, TSDictionaryRelationId, 0);
|
||||
if (!comment)
|
||||
{
|
||||
return NIL;
|
||||
}
|
||||
|
||||
CommentStmt *stmt = makeNode(CommentStmt);
|
||||
stmt->objtype = OBJECT_TSDICTIONARY;
|
||||
|
||||
stmt->object = (Node *) get_ts_dict_namelist(tsdictOid);
|
||||
stmt->comment = comment;
|
||||
return list_make1(stmt);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchConfigMappingStmt returns a list of all mappings from token_types to
|
||||
* dictionaries configured on a text search configuration identified by tsconfigOid.
|
||||
|
@ -581,7 +1064,7 @@ GetTextSearchConfigMappingStmt(Oid tsconfigOid)
|
|||
* GetTextSearchConfigOwnerStmts returns a potentially empty list of statements to change
|
||||
* the ownership of a TEXT SEARCH CONFIGURATION object.
|
||||
*
|
||||
* The list is for convenienve when building a full list of statements to recreate the
|
||||
* The list is for convenience when building a full list of statements to recreate the
|
||||
* configuration.
|
||||
*/
|
||||
static List *
|
||||
|
@ -605,6 +1088,34 @@ GetTextSearchConfigOwnerStmts(Oid tsconfigOid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetTextSearchDictionaryOwnerStmts returns a potentially empty list of statements to change
|
||||
* the ownership of a TEXT SEARCH DICTIONARY object.
|
||||
*
|
||||
* The list is for convenience when building a full list of statements to recreate the
|
||||
* dictionary.
|
||||
*/
|
||||
static List *
|
||||
GetTextSearchDictionaryOwnerStmts(Oid tsdictOid)
|
||||
{
|
||||
HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid));
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for text search dictionary %u",
|
||||
tsdictOid);
|
||||
}
|
||||
Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup);
|
||||
|
||||
AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt);
|
||||
stmt->objectType = OBJECT_TSDICTIONARY;
|
||||
stmt->object = (Node *) get_ts_dict_namelist(tsdictOid);
|
||||
stmt->newowner = GetRoleSpecObjectForUser(dict->dictowner);
|
||||
|
||||
ReleaseSysCache(tup);
|
||||
return list_make1(stmt);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_ts_config_namelist based on the tsconfigOid this function creates the namelist that
|
||||
* identifies the configuration in a fully qualified manner, irregardless of the schema
|
||||
|
@ -654,6 +1165,30 @@ get_ts_dict_namelist(Oid tsdictOid)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_ts_template_namelist based on the tstemplateOid this function creates the namelist
|
||||
* that identifies the template in a fully qualified manner, irregardless of the schema
|
||||
* existing on the search_path.
|
||||
*/
|
||||
static List *
|
||||
get_ts_template_namelist(Oid tstemplateOid)
|
||||
{
|
||||
HeapTuple tup = SearchSysCache1(TSTEMPLATEOID, ObjectIdGetDatum(tstemplateOid));
|
||||
if (!HeapTupleIsValid(tup)) /* should not happen */
|
||||
{
|
||||
elog(ERROR, "cache lookup failed for text search template %u", tstemplateOid);
|
||||
}
|
||||
Form_pg_ts_template template = (Form_pg_ts_template) GETSTRUCT(tup);
|
||||
|
||||
char *schema = get_namespace_name(template->tmplnamespace);
|
||||
char *templateName = pstrdup(NameStr(template->tmplname));
|
||||
List *names = list_make2(makeString(schema), makeString(templateName));
|
||||
|
||||
ReleaseSysCache(tup);
|
||||
return names;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_ts_config_parser_oid based on the tsconfigOid this function returns the Oid of the
|
||||
* parser used in the configuration.
|
||||
|
@ -753,6 +1288,25 @@ CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateTextSearchDictObjectAddress resolves the ObjectAddress for the object
|
||||
* being created. If missing_pk is false the function will error, explaining to the user
|
||||
* the text search dictionary described in the statement doesn't exist.
|
||||
*/
|
||||
ObjectAddress
|
||||
CreateTextSearchDictObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
DefineStmt *stmt = castNode(DefineStmt, node);
|
||||
Assert(stmt->kind == OBJECT_TSDICTIONARY);
|
||||
|
||||
Oid objid = get_ts_dict_oid(stmt->defnames, missing_ok);
|
||||
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TSDictionaryRelationId, objid);
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RenameTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not
|
||||
|
@ -772,6 +1326,25 @@ RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* RenameTextSearchDictionaryStmtObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH DICTIONARY being renamed. Optionally errors if the dictionary does not
|
||||
* exist based on the missing_ok flag passed in by the caller.
|
||||
*/
|
||||
ObjectAddress
|
||||
RenameTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
RenameStmt *stmt = castNode(RenameStmt, node);
|
||||
Assert(stmt->renameType == OBJECT_TSDICTIONARY);
|
||||
|
||||
Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok);
|
||||
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TSDictionaryRelationId, objid);
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not
|
||||
|
@ -790,6 +1363,24 @@ AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTextSearchDictionaryStmtObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not
|
||||
* exist based on the missing_ok flag passed in by the caller.
|
||||
*/
|
||||
ObjectAddress
|
||||
AlterTextSearchDictionaryStmtObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node);
|
||||
|
||||
Oid objid = get_ts_dict_oid(stmt->dictname, missing_ok);
|
||||
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TSDictionaryRelationId, objid);
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTextSearchConfigurationSchemaStmtObjectAddress resolves the ObjectAddress for the
|
||||
* TEXT SEARCH CONFIGURATION being moved to a different schema. Optionally errors if the
|
||||
|
@ -843,6 +1434,59 @@ AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTextSearchDictionarySchemaStmtObjectAddress resolves the ObjectAddress for the
|
||||
* TEXT SEARCH DICTIONARY being moved to a different schema. Optionally errors if the
|
||||
* dictionary does not exist based on the missing_ok flag passed in by the caller.
|
||||
*
|
||||
* This can be called, either before or after the move of schema has been executed, hence
|
||||
* the triple checking before the error might be thrown. Errors for non-existing schema's
|
||||
* in edgecases will be raised by postgres while executing the move.
|
||||
*/
|
||||
ObjectAddress
|
||||
AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
Oid objid = get_ts_dict_oid(castNode(List, stmt->object), true);
|
||||
|
||||
if (!OidIsValid(objid))
|
||||
{
|
||||
/*
|
||||
* couldn't find the text search dictionary, might have already been moved to
|
||||
* the new schema, we construct a new sequence name that uses the new schema to
|
||||
* search in.
|
||||
*/
|
||||
char *schemaname = NULL;
|
||||
char *dict_name = NULL;
|
||||
DeconstructQualifiedName(castNode(List, stmt->object), &schemaname, &dict_name);
|
||||
|
||||
char *newSchemaName = stmt->newschema;
|
||||
List *names = list_make2(makeString(newSchemaName), makeString(dict_name));
|
||||
objid = get_ts_dict_oid(names, true);
|
||||
|
||||
if (!missing_ok && !OidIsValid(objid))
|
||||
{
|
||||
/*
|
||||
* if the text search dict id is still invalid we couldn't find it, error
|
||||
* with the same message postgres would error with if missing_ok is false
|
||||
* (not ok to miss)
|
||||
*/
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
||||
errmsg("text search dictionary \"%s\" does not exist",
|
||||
NameListToString(castNode(List, stmt->object)))));
|
||||
}
|
||||
}
|
||||
|
||||
ObjectAddress sequenceAddress = { 0 };
|
||||
ObjectAddressSet(sequenceAddress, TSDictionaryRelationId, objid);
|
||||
return sequenceAddress;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the
|
||||
|
@ -862,6 +1506,25 @@ TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* TextSearchDictCommentObjectAddress resolves the ObjectAddress for the TEXT SEARCH
|
||||
* DICTIONARY on which the comment is placed. Optionally errors if the dictionary does not
|
||||
* exist based on the missing_ok flag passed in by the caller.
|
||||
*/
|
||||
ObjectAddress
|
||||
TextSearchDictCommentObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
CommentStmt *stmt = castNode(CommentStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_TSDICTIONARY);
|
||||
|
||||
Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok);
|
||||
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TSDictionaryRelationId, objid);
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the
|
||||
|
@ -880,6 +1543,24 @@ AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* AlterTextSearchDictOwnerObjectAddress resolves the ObjectAddress for the TEXT
|
||||
* SEARCH DICTIONARY for which the owner is changed. Optionally errors if the
|
||||
* configuration does not exist based on the missing_ok flag passed in by the caller.
|
||||
*/
|
||||
ObjectAddress
|
||||
AlterTextSearchDictOwnerObjectAddress(Node *node, bool missing_ok)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Relation relation = NULL;
|
||||
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock,
|
||||
missing_ok);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GenerateBackupNameForTextSearchConfiguration generates a safe name that is not in use
|
||||
* already that can be used to rename an existing TEXT SEARCH CONFIGURATION to allow the
|
||||
|
|
|
@ -277,7 +277,7 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement)
|
|||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("truncating foreign tables that are added to metadata "
|
||||
"can only be excuted on the coordinator")));
|
||||
"can only be executed on the coordinator")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata/dependency.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/multi_executor.h"
|
||||
|
@ -93,6 +94,9 @@ static List * FilterNameListForDistributedTypes(List *objects, bool missing_ok);
|
|||
static List * TypeNameListToObjectAddresses(List *objects);
|
||||
static TypeName * MakeTypeNameFromRangeVar(const RangeVar *relation);
|
||||
static Oid GetTypeOwner(Oid typeOid);
|
||||
static Oid LookupNonAssociatedArrayTypeNameOid(ParseState *pstate,
|
||||
const TypeName *typeName,
|
||||
bool missing_ok);
|
||||
|
||||
/* recreate functions */
|
||||
static CompositeTypeStmt * RecreateCompositeTypeStmt(Oid typeOid);
|
||||
|
@ -132,28 +136,7 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString,
|
|||
/* fully qualify before lookup and later deparsing */
|
||||
QualifyTreeNode(node);
|
||||
|
||||
/*
|
||||
* reconstruct creation statement in a portable fashion. The create_or_replace helper
|
||||
* function will be used to create the type in an idempotent manner on the workers.
|
||||
*
|
||||
* Types could exist on the worker prior to being created on the coordinator when the
|
||||
* type previously has been attempted to be created in a transaction which did not
|
||||
* commit on the coordinator.
|
||||
*/
|
||||
const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(node);
|
||||
compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql);
|
||||
|
||||
/*
|
||||
* when we allow propagation within a transaction block we should make sure to only
|
||||
* allow this in sequential mode
|
||||
*/
|
||||
EnsureSequentialMode(OBJECT_TYPE);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) compositeTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -176,9 +159,39 @@ PostprocessCompositeTypeStmt(Node *node, const char *queryString)
|
|||
* locally it can't be missing
|
||||
*/
|
||||
ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false);
|
||||
|
||||
/* If the type has any unsupported dependency, create it locally */
|
||||
DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&typeAddress);
|
||||
if (errMsg != NULL)
|
||||
{
|
||||
RaiseDeferredError(errMsg, WARNING);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* when we allow propagation within a transaction block we should make sure to only
|
||||
* allow this in sequential mode
|
||||
*/
|
||||
EnsureSequentialMode(OBJECT_TYPE);
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&typeAddress);
|
||||
|
||||
return NIL;
|
||||
/*
|
||||
* reconstruct creation statement in a portable fashion. The create_or_replace helper
|
||||
* function will be used to create the type in an idempotent manner on the workers.
|
||||
*
|
||||
* Types could exist on the worker prior to being created on the coordinator when the
|
||||
* type previously has been attempted to be created in a transaction which did not
|
||||
* commit on the coordinator.
|
||||
*/
|
||||
const char *compositeTypeStmtSql = DeparseCompositeTypeStmt(node);
|
||||
compositeTypeStmtSql = WrapCreateOrReplace(compositeTypeStmtSql);
|
||||
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) compositeTypeStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -247,22 +260,7 @@ PreprocessCreateEnumStmt(Node *node, const char *queryString,
|
|||
/* enforce fully qualified typeName for correct deparsing and lookup */
|
||||
QualifyTreeNode(node);
|
||||
|
||||
/* reconstruct creation statement in a portable fashion */
|
||||
const char *createEnumStmtSql = DeparseCreateEnumStmt(node);
|
||||
createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql);
|
||||
|
||||
/*
|
||||
* when we allow propagation within a transaction block we should make sure to only
|
||||
* allow this in sequential mode
|
||||
*/
|
||||
EnsureSequentialMode(OBJECT_TYPE);
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -284,9 +282,32 @@ PostprocessCreateEnumStmt(Node *node, const char *queryString)
|
|||
|
||||
/* lookup type address of just created type */
|
||||
ObjectAddress typeAddress = GetObjectAddressFromParseTree(node, false);
|
||||
|
||||
DeferredErrorMessage *errMsg = DeferErrorIfHasUnsupportedDependency(&typeAddress);
|
||||
if (errMsg != NULL)
|
||||
{
|
||||
RaiseDeferredError(errMsg, WARNING);
|
||||
return NIL;
|
||||
}
|
||||
|
||||
/*
|
||||
* when we allow propagation within a transaction block we should make sure to only
|
||||
* allow this in sequential mode
|
||||
*/
|
||||
EnsureSequentialMode(OBJECT_TYPE);
|
||||
|
||||
EnsureDependenciesExistOnAllNodes(&typeAddress);
|
||||
|
||||
return NIL;
|
||||
/* reconstruct creation statement in a portable fashion */
|
||||
const char *createEnumStmtSql = DeparseCreateEnumStmt(node);
|
||||
createEnumStmtSql = WrapCreateOrReplace(createEnumStmtSql);
|
||||
|
||||
/* to prevent recursion with mx we disable ddl propagation */
|
||||
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
|
||||
(void *) createEnumStmtSql,
|
||||
ENABLE_DDL_PROPAGATION);
|
||||
|
||||
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
|
||||
}
|
||||
|
||||
|
||||
|
@ -732,7 +753,7 @@ CompositeTypeStmtObjectAddress(Node *node, bool missing_ok)
|
|||
{
|
||||
CompositeTypeStmt *stmt = castNode(CompositeTypeStmt, node);
|
||||
TypeName *typeName = MakeTypeNameFromRangeVar(stmt->typevar);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
Oid typeOid = LookupNonAssociatedArrayTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TypeRelationId, typeOid);
|
||||
|
||||
|
@ -753,7 +774,7 @@ CreateEnumStmtObjectAddress(Node *node, bool missing_ok)
|
|||
{
|
||||
CreateEnumStmt *stmt = castNode(CreateEnumStmt, node);
|
||||
TypeName *typeName = makeTypeNameFromNameList(stmt->typeName);
|
||||
Oid typeOid = LookupTypeNameOid(NULL, typeName, missing_ok);
|
||||
Oid typeOid = LookupNonAssociatedArrayTypeNameOid(NULL, typeName, missing_ok);
|
||||
ObjectAddress address = { 0 };
|
||||
ObjectAddressSet(address, TypeRelationId, typeOid);
|
||||
|
||||
|
@ -1158,3 +1179,32 @@ ShouldPropagateTypeCreate()
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* LookupNonAssociatedArrayTypeNameOid returns the oid of the type with the given type name
|
||||
* that is not an array type that is associated to another user defined type.
|
||||
*/
|
||||
static Oid
|
||||
LookupNonAssociatedArrayTypeNameOid(ParseState *pstate, const TypeName *typeName,
|
||||
bool missing_ok)
|
||||
{
|
||||
Type tup = LookupTypeName(NULL, typeName, NULL, missing_ok);
|
||||
Oid typeOid = InvalidOid;
|
||||
if (tup != NULL)
|
||||
{
|
||||
if (((Form_pg_type) GETSTRUCT(tup))->typelem == 0)
|
||||
{
|
||||
typeOid = ((Form_pg_type) GETSTRUCT(tup))->oid;
|
||||
}
|
||||
ReleaseSysCache(tup);
|
||||
}
|
||||
|
||||
if (!missing_ok && typeOid == InvalidOid)
|
||||
{
|
||||
elog(ERROR, "type \"%s\" that is not an array type associated with "
|
||||
"another type does not exist", TypeNameToString(typeName));
|
||||
}
|
||||
|
||||
return typeOid;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@
|
|||
#include "utils/syscache.h"
|
||||
|
||||
bool EnableDDLPropagation = true; /* ddl propagation is enabled */
|
||||
int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_DEFERRED;
|
||||
int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE;
|
||||
PropSetCmdBehavior PropagateSetCommands = PROPSETCMD_NONE; /* SET prop off */
|
||||
static bool shouldInvalidateForeignKeyGraph = false;
|
||||
static int activeAlterTables = 0;
|
||||
|
|
|
@ -870,7 +870,19 @@ WaitEventSetFromMultiConnectionStates(List *connections, int *waitCount)
|
|||
|
||||
int eventMask = MultiConnectionStateEventMask(connectionState);
|
||||
|
||||
AddWaitEventToSet(waitEventSet, eventMask, sock, NULL, connectionState);
|
||||
int waitEventSetIndex =
|
||||
CitusAddWaitEventSetToSet(waitEventSet, eventMask, sock,
|
||||
NULL, (void *) connectionState);
|
||||
if (waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("connection establishment for node %s:%d failed",
|
||||
connectionState->connection->hostname,
|
||||
connectionState->connection->port),
|
||||
errhint("Check both the local and remote server logs for the "
|
||||
"connection establishment errors.")));
|
||||
}
|
||||
|
||||
numEventsAdded++;
|
||||
|
||||
if (waitCount)
|
||||
|
@ -1020,7 +1032,19 @@ FinishConnectionListEstablishment(List *multiConnectionList)
|
|||
{
|
||||
/* connection state changed, reset the event mask */
|
||||
uint32 eventMask = MultiConnectionStateEventMask(connectionState);
|
||||
ModifyWaitEvent(waitEventSet, event->pos, eventMask, NULL);
|
||||
bool success =
|
||||
CitusModifyWaitEvent(waitEventSet, event->pos,
|
||||
eventMask, NULL);
|
||||
if (!success)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("connection establishment for node %s:%d "
|
||||
"failed", connection->hostname,
|
||||
connection->port),
|
||||
errhint("Check both the local and remote server "
|
||||
"logs for the connection establishment "
|
||||
"errors.")));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1521,3 +1545,95 @@ MarkConnectionConnected(MultiConnection *connection)
|
|||
INSTR_TIME_SET_CURRENT(connection->connectionEstablishmentEnd);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusAddWaitEventSetToSet is a wrapper around Postgres' AddWaitEventToSet().
|
||||
*
|
||||
* AddWaitEventToSet() may throw hard errors. For example, when the
|
||||
* underlying socket for a connection is closed by the remote server
|
||||
* and already reflected by the OS, however Citus hasn't had a chance
|
||||
* to get this information. In that case, if replication factor is >1,
|
||||
* Citus can failover to other nodes for executing the query. Even if
|
||||
* replication factor = 1, Citus can give much nicer errors.
|
||||
*
|
||||
* So CitusAddWaitEventSetToSet simply puts ModifyWaitEvent into a
|
||||
* PG_TRY/PG_CATCH block in order to catch any hard errors, and
|
||||
* returns this information to the caller.
|
||||
*/
|
||||
int
|
||||
CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd,
|
||||
Latch *latch, void *user_data)
|
||||
{
|
||||
volatile int waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
|
||||
MemoryContext savedContext = CurrentMemoryContext;
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
waitEventSetIndex =
|
||||
AddWaitEventToSet(set, events, fd, latch, (void *) user_data);
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
/*
|
||||
* We might be in an arbitrary memory context when the
|
||||
* error is thrown and we should get back to one we had
|
||||
* at PG_TRY() time, especially because we are not
|
||||
* re-throwing the error.
|
||||
*/
|
||||
MemoryContextSwitchTo(savedContext);
|
||||
|
||||
FlushErrorState();
|
||||
|
||||
/* let the callers know about the failure */
|
||||
waitEventSetIndex = WAIT_EVENT_SET_INDEX_FAILED;
|
||||
}
|
||||
PG_END_TRY();
|
||||
|
||||
return waitEventSetIndex;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusModifyWaitEvent is a wrapper around Postgres' ModifyWaitEvent().
|
||||
*
|
||||
* ModifyWaitEvent may throw hard errors. For example, when the underlying
|
||||
* socket for a connection is closed by the remote server and already
|
||||
* reflected by the OS, however Citus hasn't had a chance to get this
|
||||
* information. In that case, if replication factor is >1, Citus can
|
||||
* failover to other nodes for executing the query. Even if replication
|
||||
* factor = 1, Citus can give much nicer errors.
|
||||
*
|
||||
* So CitusModifyWaitEvent simply puts ModifyWaitEvent into a PG_TRY/PG_CATCH
|
||||
* block in order to catch any hard errors, and returns this information to the
|
||||
* caller.
|
||||
*/
|
||||
bool
|
||||
CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
|
||||
{
|
||||
volatile bool success = true;
|
||||
MemoryContext savedContext = CurrentMemoryContext;
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
ModifyWaitEvent(set, pos, events, latch);
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
/*
|
||||
* We might be in an arbitrary memory context when the
|
||||
* error is thrown and we should get back to one we had
|
||||
* at PG_TRY() time, especially because we are not
|
||||
* re-throwing the error.
|
||||
*/
|
||||
MemoryContextSwitchTo(savedContext);
|
||||
|
||||
FlushErrorState();
|
||||
|
||||
/* let the callers know about the failure */
|
||||
success = false;
|
||||
}
|
||||
PG_END_TRY();
|
||||
|
||||
return success;
|
||||
}
|
||||
|
|
|
@ -906,8 +906,20 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts)
|
|||
else if (sendStatus == 0)
|
||||
{
|
||||
/* done writing, only wait for read events */
|
||||
ModifyWaitEvent(waitEventSet, event->pos, WL_SOCKET_READABLE,
|
||||
NULL);
|
||||
bool success =
|
||||
CitusModifyWaitEvent(waitEventSet, event->pos,
|
||||
WL_SOCKET_READABLE, NULL);
|
||||
if (!success)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("connection establishment for "
|
||||
"node %s:%d failed",
|
||||
connection->hostname,
|
||||
connection->port),
|
||||
errhint("Check both the local and remote "
|
||||
"server logs for the connection "
|
||||
"establishment errors.")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1052,8 +1064,17 @@ BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount,
|
|||
* and writeability (server is ready to receive bytes).
|
||||
*/
|
||||
int eventMask = WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE;
|
||||
|
||||
AddWaitEventToSet(waitEventSet, eventMask, sock, NULL, (void *) connection);
|
||||
int waitEventSetIndex =
|
||||
CitusAddWaitEventSetToSet(waitEventSet, eventMask, sock,
|
||||
NULL, (void *) connection);
|
||||
if (waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("connection establishment for node %s:%d failed",
|
||||
connection->hostname, connection->port),
|
||||
errhint("Check both the local and remote server logs for the "
|
||||
"connection establishment errors.")));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -124,6 +124,48 @@ pg_get_extensiondef_string(Oid tableRelationId)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_extension_version - given an extension OID, fetch its extversion
|
||||
* or NULL if not found.
|
||||
*/
|
||||
char *
|
||||
get_extension_version(Oid extensionId)
|
||||
{
|
||||
char *versionName = NULL;
|
||||
|
||||
Relation relation = table_open(ExtensionRelationId, AccessShareLock);
|
||||
|
||||
ScanKeyData entry[1];
|
||||
ScanKeyInit(&entry[0],
|
||||
Anum_pg_extension_oid,
|
||||
BTEqualStrategyNumber, F_OIDEQ,
|
||||
ObjectIdGetDatum(extensionId));
|
||||
|
||||
SysScanDesc scanDesc = systable_beginscan(relation, ExtensionOidIndexId, true,
|
||||
NULL, 1, entry);
|
||||
|
||||
HeapTuple tuple = systable_getnext(scanDesc);
|
||||
|
||||
/* We assume that there can be at most one matching tuple */
|
||||
if (HeapTupleIsValid(tuple))
|
||||
{
|
||||
bool isNull = false;
|
||||
Datum versionDatum = heap_getattr(tuple, Anum_pg_extension_extversion,
|
||||
RelationGetDescr(relation), &isNull);
|
||||
if (!isNull)
|
||||
{
|
||||
versionName = text_to_cstring(DatumGetTextPP(versionDatum));
|
||||
}
|
||||
}
|
||||
|
||||
systable_endscan(scanDesc);
|
||||
|
||||
table_close(relation, AccessShareLock);
|
||||
|
||||
return versionName;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_extension_schema - given an extension OID, fetch its extnamespace
|
||||
*
|
||||
|
|
|
@ -59,6 +59,7 @@ static void AppendDefElemParallel(StringInfo buf, DefElem *def);
|
|||
static void AppendDefElemCost(StringInfo buf, DefElem *def);
|
||||
static void AppendDefElemRows(StringInfo buf, DefElem *def);
|
||||
static void AppendDefElemSet(StringInfo buf, DefElem *def);
|
||||
static void AppendDefElemSupport(StringInfo buf, DefElem *def);
|
||||
|
||||
static void AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt);
|
||||
static void AppendRenameFunctionStmt(StringInfo buf, RenameStmt *stmt);
|
||||
|
@ -179,6 +180,10 @@ AppendDefElem(StringInfo buf, DefElem *def)
|
|||
{
|
||||
AppendDefElemSet(buf, def);
|
||||
}
|
||||
else if (strcmp(def->defname, "support") == 0)
|
||||
{
|
||||
AppendDefElemSupport(buf, def);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -282,6 +287,16 @@ AppendDefElemSet(StringInfo buf, DefElem *def)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendDefElemSupport appends a string representing the DefElem to a buffer
|
||||
*/
|
||||
static void
|
||||
AppendDefElemSupport(StringInfo buf, DefElem *def)
|
||||
{
|
||||
appendStringInfo(buf, " SUPPORT %s", defGetString(def));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendVariableSet appends a string representing the VariableSetStmt to a buffer
|
||||
*/
|
||||
|
|
|
@ -87,16 +87,6 @@ DeparseAlterSchemaRenameStmt(Node *node)
|
|||
static void
|
||||
AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt)
|
||||
{
|
||||
if (stmt->schemaElts != NIL)
|
||||
{
|
||||
elog(ERROR, "schema creating is not supported with other create commands");
|
||||
}
|
||||
|
||||
if (stmt->schemaname == NULL)
|
||||
{
|
||||
elog(ERROR, "schema name should be specified");
|
||||
}
|
||||
|
||||
appendStringInfoString(buf, "CREATE SCHEMA ");
|
||||
|
||||
if (stmt->if_not_exists)
|
||||
|
@ -104,7 +94,18 @@ AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt)
|
|||
appendStringInfoString(buf, "IF NOT EXISTS ");
|
||||
}
|
||||
|
||||
appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname));
|
||||
if (stmt->schemaname != NULL)
|
||||
{
|
||||
appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname));
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If the schema name is not provided, the schema will be created
|
||||
* with the name of the authorizated user.
|
||||
*/
|
||||
Assert(stmt->authrole != NULL);
|
||||
}
|
||||
|
||||
if (stmt->authrole != NULL)
|
||||
{
|
||||
|
|
|
@ -12,27 +12,28 @@
|
|||
#include "postgres.h"
|
||||
|
||||
#include "catalog/namespace.h"
|
||||
#include "commands/defrem.h"
|
||||
#include "utils/builtins.h"
|
||||
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/deparser.h"
|
||||
#include "distributed/listutils.h"
|
||||
|
||||
static void AppendDefElemList(StringInfo buf, List *defelms);
|
||||
static void AppendDefElemList(StringInfo buf, List *defelems, char *objectName);
|
||||
|
||||
static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes);
|
||||
static void AppendStringInfoDictnames(StringInfo buf, List *dicts);
|
||||
|
||||
|
||||
/*
|
||||
* DeparseCreateTextSearchStmt returns the sql for a DefineStmt defining a TEXT SEARCH
|
||||
* CONFIGURATION
|
||||
* DeparseCreateTextSearchConfigurationStmt returns the sql for a DefineStmt defining a
|
||||
* TEXT SEARCH CONFIGURATION
|
||||
*
|
||||
* Although the syntax is mutually exclusive on the two arguments that can be passed in
|
||||
* the deparser will syntactically correct multiple definitions if provided. *
|
||||
*/
|
||||
char *
|
||||
DeparseCreateTextSearchStmt(Node *node)
|
||||
DeparseCreateTextSearchConfigurationStmt(Node *node)
|
||||
{
|
||||
DefineStmt *stmt = castNode(DefineStmt, node);
|
||||
|
||||
|
@ -42,7 +43,7 @@ DeparseCreateTextSearchStmt(Node *node)
|
|||
const char *identifier = NameListToQuotedString(stmt->defnames);
|
||||
appendStringInfo(&buf, "CREATE TEXT SEARCH CONFIGURATION %s ", identifier);
|
||||
appendStringInfoString(&buf, "(");
|
||||
AppendDefElemList(&buf, stmt->definition);
|
||||
AppendDefElemList(&buf, stmt->definition, "CONFIGURATION");
|
||||
appendStringInfoString(&buf, ");");
|
||||
|
||||
return buf.data;
|
||||
|
@ -50,13 +51,38 @@ DeparseCreateTextSearchStmt(Node *node)
|
|||
|
||||
|
||||
/*
|
||||
* AppendDefElemList specialization to append a comma separated list of definitions to a
|
||||
* DeparseCreateTextSearchDictionaryStmt returns the sql for a DefineStmt defining a
|
||||
* TEXT SEARCH DICTIONARY
|
||||
*
|
||||
* Although the syntax is mutually exclusive on the two arguments that can be passed in
|
||||
* the deparser will syntactically correct multiple definitions if provided. *
|
||||
*/
|
||||
char *
|
||||
DeparseCreateTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
DefineStmt *stmt = castNode(DefineStmt, node);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
const char *identifier = NameListToQuotedString(stmt->defnames);
|
||||
appendStringInfo(&buf, "CREATE TEXT SEARCH DICTIONARY %s ", identifier);
|
||||
appendStringInfoString(&buf, "(");
|
||||
AppendDefElemList(&buf, stmt->definition, "DICTIONARY");
|
||||
appendStringInfoString(&buf, ");");
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendDefElemList is a helper to append a comma separated list of definitions to a
|
||||
* define statement.
|
||||
*
|
||||
* Currently only supports String and TypeName entries. Will error on others.
|
||||
* The extra objectName parameter is used to create meaningful error messages.
|
||||
*/
|
||||
static void
|
||||
AppendDefElemList(StringInfo buf, List *defelems)
|
||||
AppendDefElemList(StringInfo buf, List *defelems, char *objectName)
|
||||
{
|
||||
DefElem *defelem = NULL;
|
||||
bool first = true;
|
||||
|
@ -68,32 +94,25 @@ AppendDefElemList(StringInfo buf, List *defelems)
|
|||
}
|
||||
first = false;
|
||||
|
||||
/* extract identifier from defelem */
|
||||
const char *identifier = NULL;
|
||||
switch (nodeTag(defelem->arg))
|
||||
/*
|
||||
* There are some operations that can omit the argument. In that case, we only use
|
||||
* the defname.
|
||||
*
|
||||
* For example, omitting [ = value ] in the next query results in resetting the
|
||||
* option to defaults:
|
||||
* ALTER TEXT SEARCH DICTIONARY name ( option [ = value ] );
|
||||
*/
|
||||
if (defelem->arg == NULL)
|
||||
{
|
||||
case T_String:
|
||||
{
|
||||
identifier = quote_identifier(strVal(defelem->arg));
|
||||
break;
|
||||
}
|
||||
|
||||
case T_TypeName:
|
||||
{
|
||||
TypeName *typeName = castNode(TypeName, defelem->arg);
|
||||
identifier = NameListToQuotedString(typeName->names);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
{
|
||||
ereport(ERROR, (errmsg("unexpected argument during deparsing of "
|
||||
"TEXT SEARCH CONFIGURATION definition")));
|
||||
}
|
||||
appendStringInfo(buf, "%s", defelem->defname);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* extract value from defelem */
|
||||
const char *value = defGetString(defelem);
|
||||
|
||||
/* stringify */
|
||||
appendStringInfo(buf, "%s = %s", defelem->defname, identifier);
|
||||
appendStringInfo(buf, "%s = %s", defelem->defname, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,6 +155,44 @@ DeparseDropTextSearchConfigurationStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseDropTextSearchDictionaryStmt returns the sql representation for a DROP TEXT SEARCH
|
||||
* DICTIONARY ... statment. Supports dropping multiple dictionaries at once.
|
||||
*/
|
||||
char *
|
||||
DeparseDropTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
DropStmt *stmt = castNode(DropStmt, node);
|
||||
Assert(stmt->removeType == OBJECT_TSDICTIONARY);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
appendStringInfoString(&buf, "DROP TEXT SEARCH DICTIONARY ");
|
||||
List *nameList = NIL;
|
||||
bool first = true;
|
||||
foreach_ptr(nameList, stmt->objects)
|
||||
{
|
||||
if (!first)
|
||||
{
|
||||
appendStringInfoString(&buf, ", ");
|
||||
}
|
||||
first = false;
|
||||
|
||||
appendStringInfoString(&buf, NameListToQuotedString(nameList));
|
||||
}
|
||||
|
||||
if (stmt->behavior == DROP_CASCADE)
|
||||
{
|
||||
appendStringInfoString(&buf, " CASCADE");
|
||||
}
|
||||
|
||||
appendStringInfoString(&buf, ";");
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseRenameTextSearchConfigurationStmt returns the sql representation of a ALTER TEXT
|
||||
* SEARCH CONFIGURATION ... RENAME TO ... statement.
|
||||
|
@ -158,7 +215,28 @@ DeparseRenameTextSearchConfigurationStmt(Node *node)
|
|||
|
||||
|
||||
/*
|
||||
* DeparseAlterTextSearchConfigurationStmt returns the ql representation of any generic
|
||||
* DeparseRenameTextSearchDictionaryStmt returns the sql representation of a ALTER TEXT SEARCH
|
||||
* DICTIONARY ... RENAME TO ... statement.
|
||||
*/
|
||||
char *
|
||||
DeparseRenameTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
RenameStmt *stmt = castNode(RenameStmt, node);
|
||||
Assert(stmt->renameType == OBJECT_TSDICTIONARY);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
char *identifier = NameListToQuotedString(castNode(List, stmt->object));
|
||||
appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s RENAME TO %s;",
|
||||
identifier, quote_identifier(stmt->newname));
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterTextSearchConfigurationStmt returns the sql representation of any generic
|
||||
* ALTER TEXT SEARCH CONFIGURATION .... statement. The statements supported include:
|
||||
* - ALTER TEXT SEARCH CONFIGURATIONS ... ADD MAPPING FOR [, ...] WITH [, ...]
|
||||
* - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] WITH [, ...]
|
||||
|
@ -253,6 +331,28 @@ DeparseAlterTextSearchConfigurationStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterTextSearchConfigurationStmt returns the sql representation of any generic
|
||||
* ALTER TEXT SEARCH DICTIONARY .... statement. The statements supported include
|
||||
* - ALTER TEXT SEARCH DICTIONARY name ( option [ = value ] [, ... ] )
|
||||
*/
|
||||
char *
|
||||
DeparseAlterTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
char *identifier = NameListToQuotedString(castNode(List, stmt->dictname));
|
||||
appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s ( ", identifier);
|
||||
|
||||
AppendDefElemList(&buf, stmt->options, "DICTIONARY");
|
||||
appendStringInfoString(&buf, " );");
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterTextSearchConfigurationSchemaStmt returns the sql statement representing
|
||||
* ALTER TEXT SEARCH CONFIGURATION ... SET SCHEMA ... statements.
|
||||
|
@ -274,6 +374,27 @@ DeparseAlterTextSearchConfigurationSchemaStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterTextSearchDictionarySchemaStmt returns the sql statement representing ALTER TEXT
|
||||
* SEARCH DICTIONARY ... SET SCHEMA ... statements.
|
||||
*/
|
||||
char *
|
||||
DeparseAlterTextSearchDictionarySchemaStmt(Node *node)
|
||||
{
|
||||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s SET SCHEMA %s;",
|
||||
NameListToQuotedString(castNode(List, stmt->object)),
|
||||
quote_identifier(stmt->newschema));
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseTextSearchConfigurationCommentStmt returns the sql statement representing
|
||||
* COMMENT ON TEXT SEARCH CONFIGURATION ... IS ...
|
||||
|
@ -305,6 +426,37 @@ DeparseTextSearchConfigurationCommentStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseTextSearchDictionaryCommentStmt returns the sql statement representing
|
||||
* COMMENT ON TEXT SEARCH DICTIONARY ... IS ...
|
||||
*/
|
||||
char *
|
||||
DeparseTextSearchDictionaryCommentStmt(Node *node)
|
||||
{
|
||||
CommentStmt *stmt = castNode(CommentStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_TSDICTIONARY);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
appendStringInfo(&buf, "COMMENT ON TEXT SEARCH DICTIONARY %s IS ",
|
||||
NameListToQuotedString(castNode(List, stmt->object)));
|
||||
|
||||
if (stmt->comment == NULL)
|
||||
{
|
||||
appendStringInfoString(&buf, "NULL");
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfoString(&buf, quote_literal_cstr(stmt->comment));
|
||||
}
|
||||
|
||||
appendStringInfoString(&buf, ";");
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AppendStringInfoTokentypeList specializes in adding a comma separated list of
|
||||
* token_tyoe's to TEXT SEARCH CONFIGURATION commands
|
||||
|
@ -375,3 +527,24 @@ DeparseAlterTextSearchConfigurationOwnerStmt(Node *node)
|
|||
|
||||
return buf.data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeparseAlterTextSearchDictionaryOwnerStmt returns the sql statement representing ALTER TEXT
|
||||
* SEARCH DICTIONARY ... ONWER TO ... commands.
|
||||
*/
|
||||
char *
|
||||
DeparseAlterTextSearchDictionaryOwnerStmt(Node *node)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
StringInfoData buf = { 0 };
|
||||
initStringInfo(&buf);
|
||||
|
||||
appendStringInfo(&buf, "ALTER TEXT SEARCH DICTIONARY %s OWNER TO %s;",
|
||||
NameListToQuotedString(castNode(List, stmt->object)),
|
||||
RoleSpecString(stmt->newowner, true));
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
|
|
@ -124,7 +124,6 @@ QualifyCollationName(List *name)
|
|||
(Form_pg_collation) GETSTRUCT(colltup);
|
||||
|
||||
schemaName = get_namespace_name(collationForm->collnamespace);
|
||||
collationName = NameStr(collationForm->collname);
|
||||
name = list_make2(makeString(schemaName), makeString(collationName));
|
||||
ReleaseSysCache(colltup);
|
||||
}
|
||||
|
|
|
@ -69,6 +69,44 @@ QualifyDropTextSearchConfigurationStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyDropTextSearchDictionaryStmt adds any missing schema names to text search
|
||||
* dictionaries being dropped. All dictionaries are expected to exists before fully
|
||||
* qualifying the statement. Errors will be raised for objects not existing. Non-existing
|
||||
* objects are expected to not be distributed.
|
||||
*/
|
||||
void
|
||||
QualifyDropTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
DropStmt *stmt = castNode(DropStmt, node);
|
||||
Assert(stmt->removeType == OBJECT_TSDICTIONARY);
|
||||
|
||||
List *qualifiedObjects = NIL;
|
||||
List *objName = NIL;
|
||||
|
||||
foreach_ptr(objName, stmt->objects)
|
||||
{
|
||||
char *schemaName = NULL;
|
||||
char *tsdictName = NULL;
|
||||
DeconstructQualifiedName(objName, &schemaName, &tsdictName);
|
||||
|
||||
if (!schemaName)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(objName, false);
|
||||
Oid namespaceOid = get_ts_dict_namespace(tsdictOid);
|
||||
schemaName = get_namespace_name(namespaceOid);
|
||||
|
||||
objName = list_make2(makeString(schemaName),
|
||||
makeString(tsdictName));
|
||||
}
|
||||
|
||||
qualifiedObjects = lappend(qualifiedObjects, objName);
|
||||
}
|
||||
|
||||
stmt->objects = qualifiedObjects;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterTextSearchConfigurationStmt adds the schema name (if missing) to the name
|
||||
* of the text search configurations, as well as the dictionaries referenced.
|
||||
|
@ -128,6 +166,32 @@ QualifyAlterTextSearchConfigurationStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterTextSearchDictionaryStmt adds the schema name (if missing) to the name
|
||||
* of the text search dictionary.
|
||||
*/
|
||||
void
|
||||
QualifyAlterTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
AlterTSDictionaryStmt *stmt = castNode(AlterTSDictionaryStmt, node);
|
||||
|
||||
char *schemaName = NULL;
|
||||
char *objName = NULL;
|
||||
DeconstructQualifiedName(stmt->dictname, &schemaName, &objName);
|
||||
|
||||
/* fully qualify the dictname being altered */
|
||||
if (!schemaName)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(stmt->dictname, false);
|
||||
Oid namespaceOid = get_ts_dict_namespace(tsdictOid);
|
||||
schemaName = get_namespace_name(namespaceOid);
|
||||
|
||||
stmt->dictname = list_make2(makeString(schemaName),
|
||||
makeString(objName));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyRenameTextSearchConfigurationStmt adds the schema name (if missing) to the
|
||||
* configuration being renamed. The new name will kept be without schema name since this
|
||||
|
@ -156,9 +220,37 @@ QualifyRenameTextSearchConfigurationStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyRenameTextSearchDictionaryStmt adds the schema name (if missing) to the
|
||||
* dictionary being renamed. The new name will kept be without schema name since this
|
||||
* command cannot be used to change the schema of a dictionary.
|
||||
*/
|
||||
void
|
||||
QualifyRenameTextSearchDictionaryStmt(Node *node)
|
||||
{
|
||||
RenameStmt *stmt = castNode(RenameStmt, node);
|
||||
Assert(stmt->renameType == OBJECT_TSDICTIONARY);
|
||||
|
||||
char *schemaName = NULL;
|
||||
char *objName = NULL;
|
||||
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
|
||||
|
||||
/* fully qualify the dictname being altered */
|
||||
if (!schemaName)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false);
|
||||
Oid namespaceOid = get_ts_dict_namespace(tsdictOid);
|
||||
schemaName = get_namespace_name(namespaceOid);
|
||||
|
||||
stmt->object = (Node *) list_make2(makeString(schemaName),
|
||||
makeString(objName));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterTextSearchConfigurationSchemaStmt adds the schema name (if missing) for the
|
||||
* text search being moved to a new schema.
|
||||
* text search config being moved to a new schema.
|
||||
*/
|
||||
void
|
||||
QualifyAlterTextSearchConfigurationSchemaStmt(Node *node)
|
||||
|
@ -182,6 +274,32 @@ QualifyAlterTextSearchConfigurationSchemaStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterTextSearchDictionarySchemaStmt adds the schema name (if missing) for the
|
||||
* text search dictionary being moved to a new schema.
|
||||
*/
|
||||
void
|
||||
QualifyAlterTextSearchDictionarySchemaStmt(Node *node)
|
||||
{
|
||||
AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
char *schemaName = NULL;
|
||||
char *objName = NULL;
|
||||
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
|
||||
|
||||
if (!schemaName)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false);
|
||||
Oid namespaceOid = get_ts_dict_namespace(tsdictOid);
|
||||
schemaName = get_namespace_name(namespaceOid);
|
||||
|
||||
stmt->object = (Node *) list_make2(makeString(schemaName),
|
||||
makeString(objName));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyTextSearchConfigurationCommentStmt adds the schema name (if missing) to the
|
||||
* configuration name on which the comment is created.
|
||||
|
@ -208,6 +326,32 @@ QualifyTextSearchConfigurationCommentStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyTextSearchDictionaryCommentStmt adds the schema name (if missing) to the
|
||||
* dictionary name on which the comment is created.
|
||||
*/
|
||||
void
|
||||
QualifyTextSearchDictionaryCommentStmt(Node *node)
|
||||
{
|
||||
CommentStmt *stmt = castNode(CommentStmt, node);
|
||||
Assert(stmt->objtype == OBJECT_TSDICTIONARY);
|
||||
|
||||
char *schemaName = NULL;
|
||||
char *objName = NULL;
|
||||
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
|
||||
|
||||
if (!schemaName)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false);
|
||||
Oid namespaceOid = get_ts_dict_namespace(tsdictOid);
|
||||
schemaName = get_namespace_name(namespaceOid);
|
||||
|
||||
stmt->object = (Node *) list_make2(makeString(schemaName),
|
||||
makeString(objName));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterTextSearchConfigurationOwnerStmt adds the schema name (if missing) to the
|
||||
* configuration for which the owner is changing.
|
||||
|
@ -234,6 +378,32 @@ QualifyAlterTextSearchConfigurationOwnerStmt(Node *node)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* QualifyAlterTextSearchDictionaryOwnerStmt adds the schema name (if missing) to the
|
||||
* dictionary for which the owner is changing.
|
||||
*/
|
||||
void
|
||||
QualifyAlterTextSearchDictionaryOwnerStmt(Node *node)
|
||||
{
|
||||
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
|
||||
Assert(stmt->objectType == OBJECT_TSDICTIONARY);
|
||||
|
||||
char *schemaName = NULL;
|
||||
char *objName = NULL;
|
||||
DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName);
|
||||
|
||||
if (!schemaName)
|
||||
{
|
||||
Oid tsdictOid = get_ts_dict_oid(castNode(List, stmt->object), false);
|
||||
Oid namespaceOid = get_ts_dict_namespace(tsdictOid);
|
||||
schemaName = get_namespace_name(namespaceOid);
|
||||
|
||||
stmt->object = (Node *) list_make2(makeString(schemaName),
|
||||
makeString(objName));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get_ts_config_namespace returns the oid of the namespace which is housing the text
|
||||
* search configuration identified by tsconfigOid.
|
||||
|
|
|
@ -178,8 +178,6 @@
|
|||
#include "utils/timestamp.h"
|
||||
|
||||
#define SLOW_START_DISABLED 0
|
||||
#define WAIT_EVENT_SET_INDEX_NOT_INITIALIZED -1
|
||||
#define WAIT_EVENT_SET_INDEX_FAILED -2
|
||||
|
||||
|
||||
/*
|
||||
|
@ -678,10 +676,6 @@ static int UsableConnectionCount(WorkerPool *workerPool);
|
|||
static long NextEventTimeout(DistributedExecution *execution);
|
||||
static WaitEventSet * BuildWaitEventSet(List *sessionList);
|
||||
static void RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList);
|
||||
static int CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd,
|
||||
Latch *latch, void *user_data);
|
||||
static bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events,
|
||||
Latch *latch);
|
||||
static TaskPlacementExecution * PopPlacementExecution(WorkerSession *session);
|
||||
static TaskPlacementExecution * PopAssignedPlacementExecution(WorkerSession *session);
|
||||
static TaskPlacementExecution * PopUnassignedPlacementExecution(WorkerPool *workerPool);
|
||||
|
@ -1442,6 +1436,15 @@ DistributedExecutionRequiresRollback(List *taskList)
|
|||
return true;
|
||||
}
|
||||
|
||||
if (task->queryCount > 1)
|
||||
{
|
||||
/*
|
||||
* When there are multiple sequential queries in a task
|
||||
* we need to run those as a transaction.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -5367,6 +5370,19 @@ BuildWaitEventSet(List *sessionList)
|
|||
CitusAddWaitEventSetToSet(waitEventSet, connection->waitFlags, sock,
|
||||
NULL, (void *) session);
|
||||
session->waitEventSetIndex = waitEventSetIndex;
|
||||
|
||||
/*
|
||||
* Inform failed to add to wait event set with a debug message as this
|
||||
* is too detailed information for users.
|
||||
*/
|
||||
if (session->waitEventSetIndex == WAIT_EVENT_SET_INDEX_FAILED)
|
||||
{
|
||||
ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("Adding wait event for node %s:%d failed. "
|
||||
"The socket was: %d",
|
||||
session->workerPool->nodeName,
|
||||
session->workerPool->nodePort, sock)));
|
||||
}
|
||||
}
|
||||
|
||||
CitusAddWaitEventSetToSet(waitEventSet, WL_POSTMASTER_DEATH, PGINVALID_SOCKET, NULL,
|
||||
|
@ -5378,64 +5394,6 @@ BuildWaitEventSet(List *sessionList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusAddWaitEventSetToSet is a wrapper around Postgres' AddWaitEventToSet().
|
||||
*
|
||||
* AddWaitEventToSet() may throw hard errors. For example, when the
|
||||
* underlying socket for a connection is closed by the remote server
|
||||
* and already reflected by the OS, however Citus hasn't had a chance
|
||||
* to get this information. In that case, if replication factor is >1,
|
||||
* Citus can failover to other nodes for executing the query. Even if
|
||||
* replication factor = 1, Citus can give much nicer errors.
|
||||
*
|
||||
* So CitusAddWaitEventSetToSet simply puts ModifyWaitEvent into a
|
||||
* PG_TRY/PG_CATCH block in order to catch any hard errors, and
|
||||
* returns this information to the caller.
|
||||
*/
|
||||
static int
|
||||
CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd,
|
||||
Latch *latch, void *user_data)
|
||||
{
|
||||
volatile int waitEventSetIndex = WAIT_EVENT_SET_INDEX_NOT_INITIALIZED;
|
||||
MemoryContext savedContext = CurrentMemoryContext;
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
waitEventSetIndex =
|
||||
AddWaitEventToSet(set, events, fd, latch, (void *) user_data);
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
/*
|
||||
* We might be in an arbitrary memory context when the
|
||||
* error is thrown and we should get back to one we had
|
||||
* at PG_TRY() time, especially because we are not
|
||||
* re-throwing the error.
|
||||
*/
|
||||
MemoryContextSwitchTo(savedContext);
|
||||
|
||||
FlushErrorState();
|
||||
|
||||
if (user_data != NULL)
|
||||
{
|
||||
WorkerSession *workerSession = (WorkerSession *) user_data;
|
||||
|
||||
ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("Adding wait event for node %s:%d failed. "
|
||||
"The socket was: %d",
|
||||
workerSession->workerPool->nodeName,
|
||||
workerSession->workerPool->nodePort, fd)));
|
||||
}
|
||||
|
||||
/* let the callers know about the failure */
|
||||
waitEventSetIndex = WAIT_EVENT_SET_INDEX_FAILED;
|
||||
}
|
||||
PG_END_TRY();
|
||||
|
||||
return waitEventSetIndex;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GetEventSetSize returns the event set size for a list of sessions.
|
||||
*/
|
||||
|
@ -5485,7 +5443,7 @@ RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
|
|||
if (!success)
|
||||
{
|
||||
ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_FAILURE),
|
||||
errmsg("Modifying wait event for node %s:%d failed. "
|
||||
errmsg("modifying wait event for node %s:%d failed. "
|
||||
"The wait event index was: %d",
|
||||
connection->hostname, connection->port,
|
||||
waitEventSetIndex)));
|
||||
|
@ -5496,51 +5454,6 @@ RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusModifyWaitEvent is a wrapper around Postgres' ModifyWaitEvent().
|
||||
*
|
||||
* ModifyWaitEvent may throw hard errors. For example, when the underlying
|
||||
* socket for a connection is closed by the remote server and already
|
||||
* reflected by the OS, however Citus hasn't had a chance to get this
|
||||
* information. In that case, if replication factor is >1, Citus can
|
||||
* failover to other nodes for executing the query. Even if replication
|
||||
* factor = 1, Citus can give much nicer errors.
|
||||
*
|
||||
* So CitusModifyWaitEvent simply puts ModifyWaitEvent into a PG_TRY/PG_CATCH
|
||||
* block in order to catch any hard errors, and returns this information to the
|
||||
* caller.
|
||||
*/
|
||||
static bool
|
||||
CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
|
||||
{
|
||||
volatile bool success = true;
|
||||
MemoryContext savedContext = CurrentMemoryContext;
|
||||
|
||||
PG_TRY();
|
||||
{
|
||||
ModifyWaitEvent(set, pos, events, latch);
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
/*
|
||||
* We might be in an arbitrary memory context when the
|
||||
* error is thrown and we should get back to one we had
|
||||
* at PG_TRY() time, especially because we are not
|
||||
* re-throwing the error.
|
||||
*/
|
||||
MemoryContextSwitchTo(savedContext);
|
||||
|
||||
FlushErrorState();
|
||||
|
||||
/* let the callers know about the failure */
|
||||
success = false;
|
||||
}
|
||||
PG_END_TRY();
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* SetLocalForceMaxQueryParallelization is simply a C interface for setting
|
||||
* the following:
|
||||
|
|
|
@ -788,6 +788,11 @@ GetObjectTypeString(ObjectType objType)
|
|||
return "text search configuration";
|
||||
}
|
||||
|
||||
case OBJECT_TSDICTIONARY:
|
||||
{
|
||||
return "text search dictionary";
|
||||
}
|
||||
|
||||
case OBJECT_TYPE:
|
||||
{
|
||||
return "type";
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "miscadmin.h"
|
||||
#include "port.h"
|
||||
|
||||
#include "access/hash.h"
|
||||
#include "access/nbtree.h"
|
||||
#include "catalog/pg_am.h"
|
||||
#include "catalog/pg_type.h"
|
||||
|
@ -349,6 +350,12 @@ QueryTupleShardSearchInfo(ArrayType *minValuesArray, ArrayType *maxValuesArray,
|
|||
|
||||
hashFunction = palloc0(sizeof(FmgrInfo));
|
||||
fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext);
|
||||
|
||||
if (!OidIsValid(hashFunction->fn_oid))
|
||||
{
|
||||
ereport(ERROR, (errmsg("no hash function defined for type %s",
|
||||
format_type_be(partitionColumn->vartype))));
|
||||
}
|
||||
}
|
||||
|
||||
ShardInterval **shardIntervalArray = palloc0(partitionCount *
|
||||
|
|
|
@ -600,7 +600,7 @@ SupportedDependencyByCitus(const ObjectAddress *address)
|
|||
{
|
||||
case OCLASS_SCHEMA:
|
||||
{
|
||||
return true;
|
||||
return !isTempNamespace(address->objectId);
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -631,11 +631,15 @@ SupportedDependencyByCitus(const ObjectAddress *address)
|
|||
}
|
||||
|
||||
case OCLASS_COLLATION:
|
||||
case OCLASS_SCHEMA:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
case OCLASS_SCHEMA:
|
||||
{
|
||||
return !isTempNamespace(address->objectId);
|
||||
}
|
||||
|
||||
case OCLASS_PROC:
|
||||
{
|
||||
return true;
|
||||
|
@ -676,6 +680,11 @@ SupportedDependencyByCitus(const ObjectAddress *address)
|
|||
return true;
|
||||
}
|
||||
|
||||
case OCLASS_TSDICT:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
case OCLASS_TYPE:
|
||||
{
|
||||
switch (get_typtype(address->objectId))
|
||||
|
@ -771,14 +780,16 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* If the given object is a procedure, we want to create it locally,
|
||||
* so provide that information in the error detail.
|
||||
* We expect callers to interpret the error returned from this function
|
||||
* as a warning if the object itself is just being created. In that case,
|
||||
* we expect them to report below error detail as well to indicate that
|
||||
* object itself will not be propagated but will still be created locally.
|
||||
*
|
||||
* Otherwise, callers are expected to throw the error returned from this
|
||||
* function as a hard one by ignoring the detail part.
|
||||
*/
|
||||
if (getObjectClass(objectAddress) == OCLASS_PROC)
|
||||
{
|
||||
appendStringInfo(detailInfo, "\"%s\" will be created only locally",
|
||||
objectDescription);
|
||||
}
|
||||
appendStringInfo(detailInfo, "\"%s\" will be created only locally",
|
||||
objectDescription);
|
||||
|
||||
if (SupportedDependencyByCitus(undistributableDependency))
|
||||
{
|
||||
|
@ -794,9 +805,7 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress)
|
|||
objectDescription);
|
||||
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
errorInfo->data,
|
||||
strlen(detailInfo->data) == 0 ? NULL : detailInfo->data,
|
||||
hintInfo->data);
|
||||
errorInfo->data, detailInfo->data, hintInfo->data);
|
||||
}
|
||||
|
||||
appendStringInfo(errorInfo, "\"%s\" has dependency on unsupported "
|
||||
|
@ -804,9 +813,7 @@ DeferErrorIfHasUnsupportedDependency(const ObjectAddress *objectAddress)
|
|||
dependencyDescription);
|
||||
|
||||
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
|
||||
errorInfo->data,
|
||||
strlen(detailInfo->data) == 0 ? NULL : detailInfo->data,
|
||||
NULL);
|
||||
errorInfo->data, detailInfo->data, NULL);
|
||||
}
|
||||
|
||||
|
||||
|
@ -857,9 +864,13 @@ GetUndistributableDependency(const ObjectAddress *objectAddress)
|
|||
if (!SupportedDependencyByCitus(dependency))
|
||||
{
|
||||
/*
|
||||
* Since roles should be handled manually with Citus community, skip them.
|
||||
* Skip roles and text search templates.
|
||||
*
|
||||
* Roles should be handled manually with Citus community whereas text search
|
||||
* templates should be handled manually in both community and enterprise
|
||||
*/
|
||||
if (getObjectClass(dependency) != OCLASS_ROLE)
|
||||
if (getObjectClass(dependency) != OCLASS_ROLE &&
|
||||
getObjectClass(dependency) != OCLASS_TSTEMPLATE)
|
||||
{
|
||||
return dependency;
|
||||
}
|
||||
|
@ -1259,7 +1270,7 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe
|
|||
|
||||
/*
|
||||
* Tables could have indexes. Indexes themself could have dependencies that
|
||||
* need to be propagated. eg. TEXT SEARCH CONFIGRUATIONS. Here we add the
|
||||
* need to be propagated. eg. TEXT SEARCH CONFIGURATIONS. Here we add the
|
||||
* addresses of all indices to the list of objects to vist, as to make sure we
|
||||
* create all objects required by the indices before we create the table
|
||||
* including indices.
|
||||
|
|
|
@ -169,6 +169,7 @@ typedef struct MetadataCacheData
|
|||
Oid citusTableIsVisibleFuncId;
|
||||
Oid relationIsAKnownShardFuncId;
|
||||
Oid jsonbExtractPathFuncId;
|
||||
Oid jsonbExtractPathTextFuncId;
|
||||
bool databaseNameValid;
|
||||
char databaseName[NAMEDATALEN];
|
||||
} MetadataCacheData;
|
||||
|
@ -2726,6 +2727,24 @@ JsonbExtractPathFuncId(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* JsonbExtractPathTextFuncId returns oid of the jsonb_extract_path_text function.
|
||||
*/
|
||||
Oid
|
||||
JsonbExtractPathTextFuncId(void)
|
||||
{
|
||||
if (MetadataCache.jsonbExtractPathTextFuncId == InvalidOid)
|
||||
{
|
||||
const int argCount = 2;
|
||||
|
||||
MetadataCache.jsonbExtractPathTextFuncId =
|
||||
FunctionOid("pg_catalog", "jsonb_extract_path_text", argCount);
|
||||
}
|
||||
|
||||
return MetadataCache.jsonbExtractPathTextFuncId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CurrentDatabaseName gets the name of the current database and caches
|
||||
* the result.
|
||||
|
|
|
@ -1487,13 +1487,10 @@ GetDependentSequencesWithRelation(Oid relationId, List **attnumList,
|
|||
|
||||
table_close(depRel, AccessShareLock);
|
||||
|
||||
ListCell *attrdefOidCell = NULL;
|
||||
ListCell *attrdefAttnumCell = NULL;
|
||||
forboth(attrdefOidCell, attrdefResult, attrdefAttnumCell, attrdefAttnumResult)
|
||||
AttrNumber attrdefAttnum = InvalidAttrNumber;
|
||||
Oid attrdefOid = InvalidOid;
|
||||
forboth_int_oid(attrdefAttnum, attrdefAttnumResult, attrdefOid, attrdefResult)
|
||||
{
|
||||
Oid attrdefOid = lfirst_oid(attrdefOidCell);
|
||||
AttrNumber attrdefAttnum = lfirst_int(attrdefAttnumCell);
|
||||
|
||||
List *sequencesFromAttrDef = GetSequencesFromAttrDef(attrdefOid);
|
||||
|
||||
/* to simplify and eliminate cases like "DEFAULT nextval('..') - nextval('..')" */
|
||||
|
@ -1689,14 +1686,10 @@ SequenceDependencyCommandList(Oid relationId)
|
|||
|
||||
ExtractDefaultColumnsAndOwnedSequences(relationId, &columnNameList, &sequenceIdList);
|
||||
|
||||
ListCell *columnNameCell = NULL;
|
||||
ListCell *sequenceIdCell = NULL;
|
||||
|
||||
forboth(columnNameCell, columnNameList, sequenceIdCell, sequenceIdList)
|
||||
char *columnName = NULL;
|
||||
Oid sequenceId = InvalidOid;
|
||||
forboth_ptr_oid(columnName, columnNameList, sequenceId, sequenceIdList)
|
||||
{
|
||||
char *columnName = lfirst(columnNameCell);
|
||||
Oid sequenceId = lfirst_oid(sequenceIdCell);
|
||||
|
||||
if (!OidIsValid(sequenceId))
|
||||
{
|
||||
/*
|
||||
|
|
|
@ -126,6 +126,7 @@ static void ErrorIfCoordinatorMetadataSetFalse(WorkerNode *workerNode, Datum val
|
|||
char *field);
|
||||
static WorkerNode * SetShouldHaveShards(WorkerNode *workerNode, bool shouldHaveShards);
|
||||
static void RemoveOldShardPlacementForNodeGroup(int groupId);
|
||||
static int FindCoordinatorNodeId(void);
|
||||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(citus_set_coordinator_host);
|
||||
|
@ -148,6 +149,7 @@ PG_FUNCTION_INFO_V1(master_update_node);
|
|||
PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column);
|
||||
PG_FUNCTION_INFO_V1(citus_nodename_for_nodeid);
|
||||
PG_FUNCTION_INFO_V1(citus_nodeport_for_nodeid);
|
||||
PG_FUNCTION_INFO_V1(citus_coordinator_nodeid);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -275,6 +277,24 @@ citus_add_node(PG_FUNCTION_ARGS)
|
|||
*/
|
||||
if (!nodeAlreadyExists)
|
||||
{
|
||||
WorkerNode *workerNode = FindWorkerNodeAnyCluster(nodeNameString, nodePort);
|
||||
|
||||
/*
|
||||
* If the worker is not marked as a coordinator, check that
|
||||
* the node is not trying to add itself
|
||||
*/
|
||||
if (workerNode != NULL &&
|
||||
workerNode->groupId != COORDINATOR_GROUP_ID &&
|
||||
workerNode->nodeRole != SecondaryNodeRoleId() &&
|
||||
IsWorkerTheCurrentNode(workerNode))
|
||||
{
|
||||
ereport(ERROR, (errmsg("Node cannot add itself as a worker."),
|
||||
errhint(
|
||||
"Add the node as a coordinator by using: "
|
||||
"SELECT citus_set_coordinator_host('%s', %d);",
|
||||
nodeNameString, nodePort)));
|
||||
}
|
||||
|
||||
ActivateNode(nodeNameString, nodePort);
|
||||
}
|
||||
|
||||
|
@ -1519,6 +1539,25 @@ citus_nodeport_for_nodeid(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* citus_coordinator_nodeid returns the node id of the coordinator node
|
||||
*/
|
||||
Datum
|
||||
citus_coordinator_nodeid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
int coordinatorNodeId = FindCoordinatorNodeId();
|
||||
|
||||
if (coordinatorNodeId == -1)
|
||||
{
|
||||
PG_RETURN_INT32(0);
|
||||
}
|
||||
|
||||
PG_RETURN_INT32(coordinatorNodeId);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FindWorkerNode searches over the worker nodes and returns the workerNode
|
||||
* if it already exists. Else, the function returns NULL.
|
||||
|
@ -1617,6 +1656,28 @@ FindNodeWithNodeId(int nodeId, bool missingOk)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* FindCoordinatorNodeId returns the node id of the coordinator node
|
||||
*/
|
||||
static int
|
||||
FindCoordinatorNodeId()
|
||||
{
|
||||
bool includeNodesFromOtherClusters = false;
|
||||
List *nodeList = ReadDistNode(includeNodesFromOtherClusters);
|
||||
WorkerNode *node = NULL;
|
||||
|
||||
foreach_ptr(node, nodeList)
|
||||
{
|
||||
if (NodeIsCoordinator(node))
|
||||
{
|
||||
return node->nodeId;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ReadDistNode iterates over pg_dist_node table, converts each row
|
||||
* into it's memory representation (i.e., WorkerNode) and adds them into
|
||||
|
|
|
@ -411,6 +411,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr,
|
|||
case OBJECT_PROCEDURE:
|
||||
case OBJECT_AGGREGATE:
|
||||
case OBJECT_TSCONFIGURATION:
|
||||
case OBJECT_TSDICTIONARY:
|
||||
case OBJECT_TYPE:
|
||||
case OBJECT_FOREIGN_SERVER:
|
||||
case OBJECT_SEQUENCE:
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "distributed/metadata_sync.h"
|
||||
#include "distributed/namespace_utils.h"
|
||||
#include "distributed/pg_dist_shard.h"
|
||||
#include "distributed/shared_library_init.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
#include "foreign/foreign.h"
|
||||
|
@ -613,7 +614,7 @@ GetPreLoadTableCreationCommands(Oid relationId,
|
|||
|
||||
|
||||
/* add columnar options for cstore tables */
|
||||
if (accessMethod == NULL && IsColumnarTableAmTable(relationId))
|
||||
if (accessMethod == NULL && extern_IsColumnarTableAmTable(relationId))
|
||||
{
|
||||
TableDDLCommand *cstoreOptionsDDL = ColumnarGetTableOptionsDDL(relationId);
|
||||
if (cstoreOptionsDDL != NULL)
|
||||
|
@ -1047,7 +1048,8 @@ CitusCreateAlterColumnarTableSet(char *qualifiedRelationName,
|
|||
options->chunkRowCount,
|
||||
options->stripeRowCount,
|
||||
options->compressionLevel,
|
||||
quote_literal_cstr(CompressionTypeStr(options->compressionType)));
|
||||
quote_literal_cstr(extern_CompressionTypeStr(
|
||||
options->compressionType)));
|
||||
|
||||
return buf.data;
|
||||
}
|
||||
|
@ -1136,7 +1138,7 @@ ColumnarGetTableOptionsDDL(Oid relationId)
|
|||
char *relationName = get_rel_name(relationId);
|
||||
|
||||
ColumnarOptions options = { 0 };
|
||||
ReadColumnarOptions(relationId, &options);
|
||||
extern_ReadColumnarOptions(relationId, &options);
|
||||
|
||||
return ColumnarGetCustomTableOptionsDDL(schemaName, relationName, options);
|
||||
}
|
||||
|
|
|
@ -45,9 +45,6 @@ static void InlineCTEsInQueryTree(Query *query);
|
|||
static bool QueryTreeContainsInlinableCteWalker(Node *node);
|
||||
|
||||
|
||||
/* controlled via GUC */
|
||||
bool EnableCTEInlining = true;
|
||||
|
||||
/*
|
||||
* RecursivelyInlineCtesInQueryTree gets a query and recursively traverses the
|
||||
* tree from top to bottom. On each level, the CTEs that are eligable for
|
||||
|
|
|
@ -752,19 +752,6 @@ static PlannedStmt *
|
|||
InlineCtesAndCreateDistributedPlannedStmt(uint64 planId,
|
||||
DistributedPlanningContext *planContext)
|
||||
{
|
||||
if (!EnableCTEInlining)
|
||||
{
|
||||
/*
|
||||
* In Postgres 12+, users can adjust whether to inline/not inline CTEs
|
||||
* by [NOT] MATERIALIZED keywords. However, in PG 11, that's not possible.
|
||||
* So, with this we provide a way to prevent CTE inlining on Postgres 11.
|
||||
*
|
||||
* The main use-case for this is not to have divergent test outputs between
|
||||
* PG 11 vs PG 12, so not very much intended for users.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We'll inline the CTEs and try distributed planning, preserve the original
|
||||
* query in case the planning fails and we fallback to recursive planning of
|
||||
|
|
|
@ -1513,14 +1513,10 @@ InsertSelectResultIdPrefix(uint64 planId)
|
|||
static void
|
||||
RelabelTargetEntryList(List *selectTargetList, List *insertTargetList)
|
||||
{
|
||||
ListCell *selectTargetCell = NULL;
|
||||
ListCell *insertTargetCell = NULL;
|
||||
|
||||
forboth(selectTargetCell, selectTargetList, insertTargetCell, insertTargetList)
|
||||
TargetEntry *selectTargetEntry = NULL;
|
||||
TargetEntry *insertTargetEntry = NULL;
|
||||
forboth_ptr(selectTargetEntry, selectTargetList, insertTargetEntry, insertTargetList)
|
||||
{
|
||||
TargetEntry *selectTargetEntry = lfirst(selectTargetCell);
|
||||
TargetEntry *insertTargetEntry = lfirst(insertTargetCell);
|
||||
|
||||
selectTargetEntry->resname = insertTargetEntry->resname;
|
||||
}
|
||||
}
|
||||
|
@ -1537,8 +1533,6 @@ static List *
|
|||
AddInsertSelectCasts(List *insertTargetList, List *selectTargetList,
|
||||
Oid targetRelationId)
|
||||
{
|
||||
ListCell *insertEntryCell = NULL;
|
||||
ListCell *selectEntryCell = NULL;
|
||||
List *projectedEntries = NIL;
|
||||
List *nonProjectedEntries = NIL;
|
||||
|
||||
|
@ -1553,10 +1547,10 @@ AddInsertSelectCasts(List *insertTargetList, List *selectTargetList,
|
|||
TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
|
||||
int targetEntryIndex = 0;
|
||||
forboth(insertEntryCell, insertTargetList, selectEntryCell, selectTargetList)
|
||||
TargetEntry *insertEntry = NULL;
|
||||
TargetEntry *selectEntry = NULL;
|
||||
forboth_ptr(insertEntry, insertTargetList, selectEntry, selectTargetList)
|
||||
{
|
||||
TargetEntry *insertEntry = (TargetEntry *) lfirst(insertEntryCell);
|
||||
TargetEntry *selectEntry = (TargetEntry *) lfirst(selectEntryCell);
|
||||
Var *insertColumn = (Var *) insertEntry->expr;
|
||||
Form_pg_attribute attr = TupleDescAttr(destTupleDescriptor,
|
||||
insertEntry->resno - 1);
|
||||
|
|
|
@ -46,9 +46,9 @@
|
|||
#include "distributed/placement_connection.h"
|
||||
#include "distributed/tuple_destination.h"
|
||||
#include "distributed/tuplestore.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/version_compat.h"
|
||||
#include "distributed/jsonbutils.h"
|
||||
#include "executor/tstoreReceiver.h"
|
||||
#include "fmgr.h"
|
||||
#include "lib/stringinfo.h"
|
||||
|
@ -143,10 +143,8 @@ static void ExplainWorkerPlan(PlannedStmt *plannedStmt, DestReceiver *dest,
|
|||
QueryEnvironment *queryEnv,
|
||||
const instr_time *planduration,
|
||||
double *executionDurationMillisec);
|
||||
static bool ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue);
|
||||
static ExplainFormat ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName,
|
||||
ExplainFormat defaultValue);
|
||||
static bool ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result);
|
||||
static TupleDestination * CreateExplainAnlyzeDestination(Task *task,
|
||||
TupleDestination *taskDest);
|
||||
static void ExplainAnalyzeDestPutTuple(TupleDestination *self, Task *task,
|
||||
|
@ -577,8 +575,6 @@ static void
|
|||
ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es,
|
||||
ParamListInfo params)
|
||||
{
|
||||
ListCell *taskCell = NULL;
|
||||
ListCell *remoteExplainCell = NULL;
|
||||
List *remoteExplainList = NIL;
|
||||
|
||||
/* if tasks are executed, we sort them by time; unless we are on a test env */
|
||||
|
@ -593,10 +589,9 @@ ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es,
|
|||
taskList = SortList(taskList, CompareTasksByTaskId);
|
||||
}
|
||||
|
||||
foreach(taskCell, taskList)
|
||||
Task *task = NULL;
|
||||
foreach_ptr(task, taskList)
|
||||
{
|
||||
Task *task = (Task *) lfirst(taskCell);
|
||||
|
||||
RemoteExplainPlan *remoteExplain = RemoteExplain(task, es, params);
|
||||
remoteExplainList = lappend(remoteExplainList, remoteExplain);
|
||||
|
||||
|
@ -606,12 +601,9 @@ ExplainTaskList(CitusScanState *scanState, List *taskList, ExplainState *es,
|
|||
}
|
||||
}
|
||||
|
||||
forboth(taskCell, taskList, remoteExplainCell, remoteExplainList)
|
||||
RemoteExplainPlan *remoteExplain = NULL;
|
||||
forboth_ptr(task, taskList, remoteExplain, remoteExplainList)
|
||||
{
|
||||
Task *task = (Task *) lfirst(taskCell);
|
||||
RemoteExplainPlan *remoteExplain =
|
||||
(RemoteExplainPlan *) lfirst(remoteExplainCell);
|
||||
|
||||
ExplainTask(scanState, task, remoteExplain->placementIndex,
|
||||
remoteExplain->explainOutputList, es);
|
||||
}
|
||||
|
@ -1112,25 +1104,6 @@ FreeSavedExplainPlan(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldBoolean gets value of fieldName from jsonbDoc, or returns
|
||||
* defaultValue if it doesn't exist.
|
||||
*/
|
||||
static bool
|
||||
ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue)
|
||||
{
|
||||
Datum jsonbDatum = 0;
|
||||
bool found = ExtractFieldJsonbDatum(jsonbDoc, fieldName, &jsonbDatum);
|
||||
if (!found)
|
||||
{
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
Datum boolDatum = DirectFunctionCall1(jsonb_bool, jsonbDatum);
|
||||
return DatumGetBool(boolDatum);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldExplainFormat gets value of fieldName from jsonbDoc, or returns
|
||||
* defaultValue if it doesn't exist.
|
||||
|
@ -1169,50 +1142,6 @@ ExtractFieldExplainFormat(Datum jsonbDoc, const char *fieldName, ExplainFormat
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldJsonbDatum gets value of fieldName from jsonbDoc and puts it
|
||||
* into result. If not found, returns false. Otherwise, returns true.
|
||||
*/
|
||||
static bool
|
||||
ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result)
|
||||
{
|
||||
Datum pathArray[1] = { CStringGetTextDatum(fieldName) };
|
||||
bool pathNulls[1] = { false };
|
||||
bool typeByValue = false;
|
||||
char typeAlignment = 0;
|
||||
int16 typeLength = 0;
|
||||
int dimensions[1] = { 1 };
|
||||
int lowerbounds[1] = { 1 };
|
||||
|
||||
get_typlenbyvalalign(TEXTOID, &typeLength, &typeByValue, &typeAlignment);
|
||||
|
||||
ArrayType *pathArrayObject = construct_md_array(pathArray, pathNulls, 1, dimensions,
|
||||
lowerbounds, TEXTOID, typeLength,
|
||||
typeByValue, typeAlignment);
|
||||
Datum pathDatum = PointerGetDatum(pathArrayObject);
|
||||
|
||||
/*
|
||||
* We need to check whether the result of jsonb_extract_path is NULL or not, so use
|
||||
* FunctionCallInvoke() instead of other function call api.
|
||||
*
|
||||
* We cannot use jsonb_path_exists to ensure not-null since it is not available in
|
||||
* postgres 11.
|
||||
*/
|
||||
FmgrInfo fmgrInfo;
|
||||
fmgr_info(JsonbExtractPathFuncId(), &fmgrInfo);
|
||||
|
||||
LOCAL_FCINFO(functionCallInfo, 2);
|
||||
InitFunctionCallInfoData(*functionCallInfo, &fmgrInfo, 2, DEFAULT_COLLATION_OID, NULL,
|
||||
NULL);
|
||||
|
||||
fcSetArg(functionCallInfo, 0, jsonbDoc);
|
||||
fcSetArg(functionCallInfo, 1, pathDatum);
|
||||
|
||||
*result = FunctionCallInvoke(functionCallInfo);
|
||||
return !functionCallInfo->isnull;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CitusExplainOneQuery is the executor hook that is called when
|
||||
* postgres wants to explain a query.
|
||||
|
@ -1483,7 +1412,9 @@ WrapQueryForExplainAnalyze(const char *queryString, TupleDesc tupleDesc)
|
|||
}
|
||||
|
||||
Form_pg_attribute attr = &tupleDesc->attrs[columnIndex];
|
||||
char *attrType = format_type_with_typemod(attr->atttypid, attr->atttypmod);
|
||||
char *attrType = format_type_extended(attr->atttypid, attr->atttypmod,
|
||||
FORMAT_TYPE_TYPEMOD_GIVEN |
|
||||
FORMAT_TYPE_FORCE_QUALIFY);
|
||||
|
||||
appendStringInfo(columnDef, "field_%d %s", columnIndex, attrType);
|
||||
}
|
||||
|
|
|
@ -2194,11 +2194,9 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId,
|
|||
DeferredErrorMessage **planningError)
|
||||
{
|
||||
List *sqlTaskList = NIL;
|
||||
ListCell *restrictionCell = NULL;
|
||||
uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */
|
||||
int shardCount = 0;
|
||||
bool *taskRequiredForShardIndex = NULL;
|
||||
ListCell *prunedRelationShardCell = NULL;
|
||||
|
||||
/* error if shards are not co-partitioned */
|
||||
ErrorIfUnsupportedShardDistribution(query);
|
||||
|
@ -2216,14 +2214,13 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId,
|
|||
int minShardOffset = 0;
|
||||
int maxShardOffset = 0;
|
||||
|
||||
forboth(prunedRelationShardCell, prunedRelationShardList,
|
||||
restrictionCell, relationRestrictionContext->relationRestrictionList)
|
||||
RelationRestriction *relationRestriction = NULL;
|
||||
List *prunedShardList = NULL;
|
||||
|
||||
forboth_ptr(prunedShardList, prunedRelationShardList,
|
||||
relationRestriction, relationRestrictionContext->relationRestrictionList)
|
||||
{
|
||||
RelationRestriction *relationRestriction =
|
||||
(RelationRestriction *) lfirst(restrictionCell);
|
||||
Oid relationId = relationRestriction->relationId;
|
||||
List *prunedShardList = (List *) lfirst(prunedRelationShardCell);
|
||||
ListCell *shardIntervalCell = NULL;
|
||||
|
||||
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId);
|
||||
if (IsCitusTableTypeCacheEntry(cacheEntry, CITUS_TABLE_WITH_NO_DIST_KEY))
|
||||
|
@ -2266,9 +2263,9 @@ QueryPushdownSqlTaskList(Query *query, uint64 jobId,
|
|||
continue;
|
||||
}
|
||||
|
||||
foreach(shardIntervalCell, prunedShardList)
|
||||
ShardInterval *shardInterval = NULL;
|
||||
foreach_ptr(shardInterval, prunedShardList)
|
||||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
int shardIndex = shardInterval->shardIndex;
|
||||
|
||||
taskRequiredForShardIndex[shardIndex] = true;
|
||||
|
|
|
@ -45,6 +45,8 @@
|
|||
#include "parser/parsetree.h"
|
||||
|
||||
|
||||
#define INVALID_RELID -1
|
||||
|
||||
/*
|
||||
* RecurringTuplesType is used to distinguish different types of expressions
|
||||
* that always produce the same set of tuples when a shard is queried. We make
|
||||
|
@ -61,6 +63,17 @@ typedef enum RecurringTuplesType
|
|||
RECURRING_TUPLES_VALUES
|
||||
} RecurringTuplesType;
|
||||
|
||||
/*
|
||||
* RelidsReferenceWalkerContext is used to find Vars in a (sub)query that
|
||||
* refer to certain relids from the upper query.
|
||||
*/
|
||||
typedef struct RelidsReferenceWalkerContext
|
||||
{
|
||||
int level;
|
||||
Relids relids;
|
||||
int foundRelid;
|
||||
} RelidsReferenceWalkerContext;
|
||||
|
||||
|
||||
/* Config variable managed via guc.c */
|
||||
bool SubqueryPushdown = false; /* is subquery pushdown enabled */
|
||||
|
@ -76,7 +89,9 @@ static RecurringTuplesType FromClauseRecurringTupleType(Query *queryTree);
|
|||
static DeferredErrorMessage * DeferredErrorIfUnsupportedRecurringTuplesJoin(
|
||||
PlannerRestrictionContext *plannerRestrictionContext);
|
||||
static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree);
|
||||
static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree);
|
||||
static DeferredErrorMessage * DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree, bool
|
||||
lateral,
|
||||
char *referencedThing);
|
||||
static bool ExtractSetOperationStatementWalker(Node *node, List **setOperationList);
|
||||
static RecurringTuplesType FetchFirstRecurType(PlannerInfo *plannerInfo,
|
||||
Relids relids);
|
||||
|
@ -90,7 +105,12 @@ static List * CreateSubqueryTargetListAndAdjustVars(List *columnList);
|
|||
static AttrNumber FindResnoForVarInTargetList(List *targetList, int varno, int varattno);
|
||||
static bool RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo,
|
||||
Relids relids);
|
||||
static DeferredErrorMessage * DeferredErrorIfUnsupportedLateralSubquery(
|
||||
PlannerInfo *plannerInfo, Relids recurringRelIds, Relids nonRecurringRelIds);
|
||||
static Var * PartitionColumnForPushedDownSubquery(Query *query);
|
||||
static bool ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid);
|
||||
static bool ContainsReferencesToRelidsWalker(Node *node,
|
||||
RelidsReferenceWalkerContext *context);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -844,6 +864,49 @@ DeferredErrorIfUnsupportedRecurringTuplesJoin(
|
|||
break;
|
||||
}
|
||||
}
|
||||
else if (joinType == JOIN_INNER && plannerInfo->hasLateralRTEs)
|
||||
{
|
||||
/*
|
||||
* Sometimes we cannot push down INNER JOINS when they have only
|
||||
* recurring tuples on one side and a lateral on the other side.
|
||||
* See comment on DeferredErrorIfUnsupportedLateralSubquery for
|
||||
* details.
|
||||
*
|
||||
* When planning inner joins postgres can move RTEs from left to
|
||||
* right and from right to left. So we don't know on which side the
|
||||
* lateral join wil appear. Thus we try to find a side of the join
|
||||
* that only contains recurring tuples. And then we check the other
|
||||
* side to see if it contains an unsupported lateral join.
|
||||
*
|
||||
*/
|
||||
if (RelationInfoContainsOnlyRecurringTuples(plannerInfo, innerrelRelids))
|
||||
{
|
||||
DeferredErrorMessage *deferredError =
|
||||
DeferredErrorIfUnsupportedLateralSubquery(plannerInfo,
|
||||
innerrelRelids,
|
||||
outerrelRelids);
|
||||
if (deferredError)
|
||||
{
|
||||
return deferredError;
|
||||
}
|
||||
}
|
||||
else if (RelationInfoContainsOnlyRecurringTuples(plannerInfo, outerrelRelids))
|
||||
{
|
||||
/*
|
||||
* This branch uses "else if" instead of "if", because if both
|
||||
* sides contain only recurring tuples there will never be an
|
||||
* unsupported lateral subquery.
|
||||
*/
|
||||
DeferredErrorMessage *deferredError =
|
||||
DeferredErrorIfUnsupportedLateralSubquery(plannerInfo,
|
||||
outerrelRelids,
|
||||
innerrelRelids);
|
||||
if (deferredError)
|
||||
{
|
||||
return deferredError;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (recurType == RECURRING_TUPLES_REFERENCE_TABLE)
|
||||
|
@ -950,7 +1013,8 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi
|
|||
*/
|
||||
if (!ContainsReferencesToOuterQuery(subqueryTree))
|
||||
{
|
||||
deferredError = DeferErrorIfSubqueryRequiresMerge(subqueryTree);
|
||||
deferredError = DeferErrorIfSubqueryRequiresMerge(subqueryTree, false,
|
||||
"another query");
|
||||
if (deferredError)
|
||||
{
|
||||
return deferredError;
|
||||
|
@ -1028,24 +1092,29 @@ DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLi
|
|||
* column, etc.).
|
||||
*/
|
||||
static DeferredErrorMessage *
|
||||
DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree)
|
||||
DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree, bool lateral,
|
||||
char *referencedThing)
|
||||
{
|
||||
bool preconditionsSatisfied = true;
|
||||
char *errorDetail = NULL;
|
||||
|
||||
char *lateralString = lateral ? "lateral " : "";
|
||||
|
||||
if (subqueryTree->limitOffset)
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
errorDetail = "Offset clause is currently unsupported when a subquery "
|
||||
"references a column from another query";
|
||||
errorDetail = psprintf("Offset clause is currently unsupported when a %ssubquery "
|
||||
"references a column from %s", lateralString,
|
||||
referencedThing);
|
||||
}
|
||||
|
||||
/* limit is not supported when SubqueryPushdown is not set */
|
||||
if (subqueryTree->limitCount && !SubqueryPushdown)
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
errorDetail = "Limit in subquery is currently unsupported when a "
|
||||
"subquery references a column from another query";
|
||||
errorDetail = psprintf("Limit clause is currently unsupported when a "
|
||||
"%ssubquery references a column from %s", lateralString,
|
||||
referencedThing);
|
||||
}
|
||||
|
||||
/* group clause list must include partition column */
|
||||
|
@ -1060,9 +1129,9 @@ DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree)
|
|||
if (!groupOnPartitionColumn)
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
errorDetail = "Group by list without partition column is currently "
|
||||
"unsupported when a subquery references a column "
|
||||
"from another query";
|
||||
errorDetail = psprintf("Group by list without partition column is currently "
|
||||
"unsupported when a %ssubquery references a column "
|
||||
"from %s", lateralString, referencedThing);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1070,17 +1139,18 @@ DeferErrorIfSubqueryRequiresMerge(Query *subqueryTree)
|
|||
if (subqueryTree->hasAggs && (subqueryTree->groupClause == NULL))
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
errorDetail = "Aggregates without group by are currently unsupported "
|
||||
"when a subquery references a column from another query";
|
||||
errorDetail = psprintf("Aggregates without group by are currently unsupported "
|
||||
"when a %ssubquery references a column from %s",
|
||||
lateralString, referencedThing);
|
||||
}
|
||||
|
||||
/* having clause without group by on partition column is not supported */
|
||||
if (subqueryTree->havingQual && (subqueryTree->groupClause == NULL))
|
||||
{
|
||||
preconditionsSatisfied = false;
|
||||
errorDetail = "Having qual without group by on partition column is "
|
||||
"currently unsupported when a subquery references "
|
||||
"a column from another query";
|
||||
errorDetail = psprintf("Having qual without group by on partition column is "
|
||||
"currently unsupported when a %ssubquery references "
|
||||
"a column from %s", lateralString, referencedThing);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1397,6 +1467,259 @@ RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo, Relids relids)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* RecurringTypeDescription returns a discriptive string for the given
|
||||
* recurType. This string can be used in error messages to help the users
|
||||
* understand why a query cannot be planned.
|
||||
*/
|
||||
static char *
|
||||
RecurringTypeDescription(RecurringTuplesType recurType)
|
||||
{
|
||||
switch (recurType)
|
||||
{
|
||||
case RECURRING_TUPLES_REFERENCE_TABLE:
|
||||
{
|
||||
return "a reference table";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_FUNCTION:
|
||||
{
|
||||
return "a table function";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_EMPTY_JOIN_TREE:
|
||||
{
|
||||
return "a subquery without FROM";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_RESULT_FUNCTION:
|
||||
{
|
||||
return "complex subqueries, CTEs or local tables";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_VALUES:
|
||||
{
|
||||
return "a VALUES clause";
|
||||
}
|
||||
|
||||
case RECURRING_TUPLES_INVALID:
|
||||
{
|
||||
/*
|
||||
* This branch should never be hit, but it's here just in case it
|
||||
* happens.
|
||||
*/
|
||||
return "an unknown recurring tuple";
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This should never be hit, but is needed to fix compiler warnings.
|
||||
*/
|
||||
return "an unknown recurring tuple";
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ContainsReferencesToRelids determines whether the given query contains
|
||||
* any references that point to columns of the given relids. The given relids
|
||||
* should be from exactly one query level above the given query.
|
||||
*
|
||||
* If the function returns true, then foundRelid is set to the first relid that
|
||||
* was referenced.
|
||||
*
|
||||
* There are some queries where it cannot easily be determined if the relids
|
||||
* are used, e.g because the query contains placeholder vars. In those cases
|
||||
* this function returns true, because it's better to error out than to return
|
||||
* wrong results. But in these cases foundRelid is set to INVALID_RELID.
|
||||
*/
|
||||
static bool
|
||||
ContainsReferencesToRelids(Query *query, Relids relids, int *foundRelid)
|
||||
{
|
||||
RelidsReferenceWalkerContext context = { 0 };
|
||||
context.level = 1;
|
||||
context.relids = relids;
|
||||
context.foundRelid = INVALID_RELID;
|
||||
int flags = 0;
|
||||
|
||||
if (query_tree_walker(query, ContainsReferencesToRelidsWalker,
|
||||
&context, flags))
|
||||
{
|
||||
*foundRelid = context.foundRelid;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ContainsReferencesToRelidsWalker determines whether the given query
|
||||
* contains any Vars that reference the relids in the context.
|
||||
*
|
||||
* ContainsReferencesToRelidsWalker recursively descends into subqueries
|
||||
* and increases the level by 1 before recursing.
|
||||
*/
|
||||
static bool
|
||||
ContainsReferencesToRelidsWalker(Node *node, RelidsReferenceWalkerContext *context)
|
||||
{
|
||||
if (node == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsA(node, Var))
|
||||
{
|
||||
Var *var = (Var *) node;
|
||||
if (var->varlevelsup == context->level && bms_is_member(var->varno,
|
||||
context->relids))
|
||||
{
|
||||
context->foundRelid = var->varno;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else if (IsA(node, Aggref))
|
||||
{
|
||||
if (((Aggref *) node)->agglevelsup > context->level)
|
||||
{
|
||||
/*
|
||||
* TODO: Only return true when aggref points to an aggregate that
|
||||
* uses vars from a recurring tuple.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else if (IsA(node, GroupingFunc))
|
||||
{
|
||||
if (((GroupingFunc *) node)->agglevelsup > context->level)
|
||||
{
|
||||
/*
|
||||
* TODO: Only return true when groupingfunc points to a grouping
|
||||
* func that uses vars from a recurring tuple.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else if (IsA(node, PlaceHolderVar))
|
||||
{
|
||||
if (((PlaceHolderVar *) node)->phlevelsup > context->level)
|
||||
{
|
||||
/*
|
||||
* TODO: Only return true when aggref points to a placeholdervar
|
||||
* that uses vars from a recurring tuple.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else if (IsA(node, Query))
|
||||
{
|
||||
Query *query = (Query *) node;
|
||||
int flags = 0;
|
||||
|
||||
context->level += 1;
|
||||
bool found = query_tree_walker(query, ContainsReferencesToRelidsWalker,
|
||||
context, flags);
|
||||
context->level -= 1;
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
return expression_tree_walker(node, ContainsReferencesToRelidsWalker,
|
||||
context);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DeferredErrorIfUnsupportedLateralSubquery returns true if
|
||||
* notFullyRecurringRelids contains a lateral subquery that we do not support.
|
||||
*
|
||||
* If there is an inner join with a lateral subquery we cannot
|
||||
* push it down when the following properties all hold:
|
||||
* 1. The lateral subquery contains some non recurring tuples
|
||||
* 2. The lateral subquery references a recurring tuple from
|
||||
* outside of the subquery (recurringRelids)
|
||||
* 3. The lateral subquery requires a merge step (e.g. a LIMIT)
|
||||
* 4. The reference to the recurring tuple should be something else than an
|
||||
* equality check on the distribution column, e.g. equality on a non
|
||||
* distribution column.
|
||||
*
|
||||
* Property number four is considered both hard to detect and
|
||||
* probably not used very often, so we only check for 1, 2 and 3.
|
||||
*/
|
||||
static DeferredErrorMessage *
|
||||
DeferredErrorIfUnsupportedLateralSubquery(PlannerInfo *plannerInfo,
|
||||
Relids recurringRelids,
|
||||
Relids notFullyRecurringRelids)
|
||||
{
|
||||
int relationId = -1;
|
||||
while ((relationId = bms_next_member(notFullyRecurringRelids, relationId)) >= 0)
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = plannerInfo->simple_rte_array[relationId];
|
||||
|
||||
if (!rangeTableEntry->lateral)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
/* TODO: What about others kinds? */
|
||||
if (rangeTableEntry->rtekind == RTE_SUBQUERY)
|
||||
{
|
||||
/* property number 1, contains non-recurring tuples */
|
||||
if (!FindNodeMatchingCheckFunctionInRangeTableList(
|
||||
list_make1(rangeTableEntry), IsDistributedTableRTE))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
/* property number 2, references recurring tuple */
|
||||
int recurringRelid = INVALID_RELID;
|
||||
if (!ContainsReferencesToRelids(rangeTableEntry->subquery, recurringRelids,
|
||||
&recurringRelid))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
char *recurTypeDescription =
|
||||
"an aggregate, grouping func or placeholder var coming from the outer query";
|
||||
if (recurringRelid != INVALID_RELID)
|
||||
{
|
||||
RangeTblEntry *recurringRangeTableEntry =
|
||||
plannerInfo->simple_rte_array[recurringRelid];
|
||||
RecurringTuplesType recurType = RECURRING_TUPLES_INVALID;
|
||||
ContainsRecurringRTE(recurringRangeTableEntry, &recurType);
|
||||
recurTypeDescription = RecurringTypeDescription(recurType);
|
||||
|
||||
/*
|
||||
* Add the alias for all recuring tuples where it is useful to
|
||||
* see them. We don't add it for VALUES and intermediate
|
||||
* results, because there the aliases are currently hardcoded
|
||||
* strings anyway.
|
||||
*/
|
||||
if (recurType != RECURRING_TUPLES_VALUES &&
|
||||
recurType != RECURRING_TUPLES_RESULT_FUNCTION)
|
||||
{
|
||||
recurTypeDescription = psprintf("%s (%s)", recurTypeDescription,
|
||||
recurringRangeTableEntry->eref->
|
||||
aliasname);
|
||||
}
|
||||
}
|
||||
|
||||
/* property number 3, has a merge step */
|
||||
DeferredErrorMessage *deferredError = DeferErrorIfSubqueryRequiresMerge(
|
||||
rangeTableEntry->subquery, true, recurTypeDescription);
|
||||
if (deferredError)
|
||||
{
|
||||
return deferredError;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FetchFirstRecurType checks whether the relationInfo
|
||||
* contains any recurring table expression, namely a reference table,
|
||||
|
|
|
@ -95,11 +95,42 @@
|
|||
#include "utils/syscache.h"
|
||||
#include "utils/varlena.h"
|
||||
|
||||
#include "columnar/mod.h"
|
||||
#include "columnar/columnar.h"
|
||||
|
||||
/* marks shared object as one loadable by the postgres version compiled against */
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
ColumnarSupportsIndexAM_type extern_ColumnarSupportsIndexAM = NULL;
|
||||
CompressionTypeStr_type extern_CompressionTypeStr = NULL;
|
||||
IsColumnarTableAmTable_type extern_IsColumnarTableAmTable = NULL;
|
||||
ReadColumnarOptions_type extern_ReadColumnarOptions = NULL;
|
||||
|
||||
/*
|
||||
* Define "pass-through" functions so that a SQL function defined as one of
|
||||
* these symbols in the citus module can use the definition in the columnar
|
||||
* module.
|
||||
*/
|
||||
#define DEFINE_COLUMNAR_PASSTHROUGH_FUNC(funcname) \
|
||||
static PGFunction CppConcat(extern_, funcname); \
|
||||
PG_FUNCTION_INFO_V1(funcname); \
|
||||
Datum funcname(PG_FUNCTION_ARGS) \
|
||||
{ \
|
||||
return CppConcat(extern_, funcname)(fcinfo); \
|
||||
}
|
||||
#define INIT_COLUMNAR_SYMBOL(typename, funcname) \
|
||||
CppConcat(extern_, funcname) = \
|
||||
(typename) (void *) lookup_external_function(handle, # funcname)
|
||||
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_handler)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(alter_columnar_table_set)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(alter_columnar_table_reset)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(upgrade_columnar_storage)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(downgrade_columnar_storage)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_relation_storageid)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_storage_info)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(columnar_store_memory_stats)
|
||||
DEFINE_COLUMNAR_PASSTHROUGH_FUNC(test_columnar_storage_write_new_page)
|
||||
|
||||
#define DUMMY_REAL_TIME_EXECUTOR_ENUM_VALUE 9999999
|
||||
static char *CitusVersion = CITUS_VERSION;
|
||||
|
||||
|
@ -323,12 +354,6 @@ _PG_init(void)
|
|||
original_client_auth_hook = ClientAuthentication_hook;
|
||||
ClientAuthentication_hook = CitusAuthHook;
|
||||
|
||||
/*
|
||||
* When the options change on a columnar table, we may need to propagate
|
||||
* the changes to shards.
|
||||
*/
|
||||
ColumnarTableSetOptions_hook = ColumnarTableSetOptionsHook;
|
||||
|
||||
InitializeMaintenanceDaemon();
|
||||
|
||||
/* initialize coordinated transaction management */
|
||||
|
@ -357,7 +382,50 @@ _PG_init(void)
|
|||
{
|
||||
DoInitialCleanup();
|
||||
}
|
||||
columnar_init();
|
||||
|
||||
/* ensure columnar module is loaded at the right time */
|
||||
load_file(COLUMNAR_MODULE_NAME, false);
|
||||
|
||||
/*
|
||||
* Now, acquire symbols from columnar module. First, acquire
|
||||
* the address of the set options hook, and set it so that we
|
||||
* can propagate options changes.
|
||||
*/
|
||||
ColumnarTableSetOptions_hook_type **ColumnarTableSetOptions_hook_ptr =
|
||||
(ColumnarTableSetOptions_hook_type **) find_rendezvous_variable(
|
||||
COLUMNAR_SETOPTIONS_HOOK_SYM);
|
||||
|
||||
/* rendezvous variable registered during columnar initialization */
|
||||
Assert(ColumnarTableSetOptions_hook_ptr != NULL);
|
||||
Assert(*ColumnarTableSetOptions_hook_ptr != NULL);
|
||||
|
||||
**ColumnarTableSetOptions_hook_ptr = ColumnarTableSetOptionsHook;
|
||||
|
||||
/*
|
||||
* Acquire symbols for columnar functions that citus calls.
|
||||
*/
|
||||
void *handle = NULL;
|
||||
|
||||
/* use load_external_function() the first time to initialize the handle */
|
||||
extern_ColumnarSupportsIndexAM = (ColumnarSupportsIndexAM_type) (void *)
|
||||
load_external_function(COLUMNAR_MODULE_NAME,
|
||||
"ColumnarSupportsIndexAM",
|
||||
true, &handle);
|
||||
|
||||
INIT_COLUMNAR_SYMBOL(CompressionTypeStr_type, CompressionTypeStr);
|
||||
INIT_COLUMNAR_SYMBOL(IsColumnarTableAmTable_type, IsColumnarTableAmTable);
|
||||
INIT_COLUMNAR_SYMBOL(ReadColumnarOptions_type, ReadColumnarOptions);
|
||||
|
||||
/* initialize symbols for "pass-through" functions */
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_handler);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, alter_columnar_table_set);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, alter_columnar_table_reset);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, upgrade_columnar_storage);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, downgrade_columnar_storage);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_relation_storageid);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats);
|
||||
INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page);
|
||||
}
|
||||
|
||||
|
||||
|
@ -693,7 +761,7 @@ RegisterCitusConfigVariables(void)
|
|||
"off performance for full transactional consistency on the creation "
|
||||
"of new objects."),
|
||||
&CreateObjectPropagationMode,
|
||||
CREATE_OBJECT_PROPAGATION_DEFERRED, create_object_propagation_options,
|
||||
CREATE_OBJECT_PROPAGATION_IMMEDIATE, create_object_propagation_options,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
@ -828,25 +896,6 @@ RegisterCitusConfigVariables(void)
|
|||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
/*
|
||||
* We shouldn't need this variable after we drop support to PostgreSQL 11 and
|
||||
* below. So, noting it here with PG_VERSION_NUM < PG_VERSION_12
|
||||
*/
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_cte_inlining",
|
||||
gettext_noop("When set to false, CTE inlining feature is disabled."),
|
||||
gettext_noop(
|
||||
"This feature is not intended for users and it is deprecated. It is developed "
|
||||
"to get consistent regression test outputs between Postgres 11"
|
||||
"and Postgres 12. In Postgres 12+, the user can control the behaviour"
|
||||
"by [NOT] MATERIALIZED keyword on CTEs. However, in PG 11, we cannot do "
|
||||
"that."),
|
||||
&EnableCTEInlining,
|
||||
true,
|
||||
PGC_SUSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_ddl_propagation",
|
||||
gettext_noop("Enables propagating DDL statements to worker shards"),
|
||||
|
|
|
@ -103,3 +103,5 @@ GRANT SELECT ON pg_catalog.pg_dist_object TO public;
|
|||
|
||||
#include "udfs/citus_nodeid_for_gpid/11.0-1.sql"
|
||||
#include "udfs/citus_pid_for_gpid/11.0-1.sql"
|
||||
|
||||
#include "udfs/citus_coordinator_nodeid/11.0-1.sql"
|
||||
|
|
|
@ -370,3 +370,5 @@ DROP FUNCTION pg_catalog.citus_nodeport_for_nodeid(integer);
|
|||
|
||||
DROP FUNCTION pg_catalog.citus_nodeid_for_gpid(bigint);
|
||||
DROP FUNCTION pg_catalog.citus_pid_for_gpid(bigint);
|
||||
|
||||
DROP FUNCTION pg_catalog.citus_coordinator_nodeid();
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_coordinator_nodeid()
|
||||
RETURNS integer
|
||||
LANGUAGE C STABLE STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_coordinator_nodeid$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_coordinator_nodeid()
|
||||
IS 'returns node id of the coordinator node';
|
|
@ -0,0 +1,7 @@
|
|||
CREATE OR REPLACE FUNCTION pg_catalog.citus_coordinator_nodeid()
|
||||
RETURNS integer
|
||||
LANGUAGE C STABLE STRICT
|
||||
AS 'MODULE_PATHNAME', $$citus_coordinator_nodeid$$;
|
||||
|
||||
COMMENT ON FUNCTION pg_catalog.citus_coordinator_nodeid()
|
||||
IS 'returns node id of the coordinator node';
|
|
@ -117,13 +117,12 @@ END;
|
|||
|
||||
-- first, check if all nodes have the same versions
|
||||
SELECT
|
||||
count(*) INTO worker_node_version_count
|
||||
count(distinct result) INTO worker_node_version_count
|
||||
FROM
|
||||
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';')
|
||||
GROUP BY result;
|
||||
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus''');
|
||||
IF enforce_version_check AND worker_node_version_count != 1 THEN
|
||||
RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently '
|
||||
'the some of the workers has version different versions';
|
||||
'some of the workers have different versions.';
|
||||
ELSE
|
||||
RAISE DEBUG 'All worker nodes have the same Citus version';
|
||||
END IF;
|
||||
|
|
|
@ -117,13 +117,12 @@ END;
|
|||
|
||||
-- first, check if all nodes have the same versions
|
||||
SELECT
|
||||
count(*) INTO worker_node_version_count
|
||||
count(distinct result) INTO worker_node_version_count
|
||||
FROM
|
||||
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';')
|
||||
GROUP BY result;
|
||||
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus''');
|
||||
IF enforce_version_check AND worker_node_version_count != 1 THEN
|
||||
RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently '
|
||||
'the some of the workers has version different versions';
|
||||
'some of the workers have different versions.';
|
||||
ELSE
|
||||
RAISE DEBUG 'All worker nodes have the same Citus version';
|
||||
END IF;
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* hide_shards.c
|
||||
*
|
||||
* This file contains functions to provide helper UDFs for hiding
|
||||
* shards from the applications.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
#include "funcapi.h"
|
||||
#include "miscadmin.h"
|
||||
#include "pgstat.h"
|
||||
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/worker_shard_visibility.h"
|
||||
|
||||
|
||||
PG_FUNCTION_INFO_V1(set_backend_type);
|
||||
|
||||
/*
|
||||
* set_backend_type is an external API to set the MyBackendType and
|
||||
* re-checks the shard visibility.
|
||||
*/
|
||||
Datum
|
||||
set_backend_type(PG_FUNCTION_ARGS)
|
||||
{
|
||||
EnsureSuperUser();
|
||||
|
||||
MyBackendType = PG_GETARG_INT32(0);
|
||||
|
||||
elog(NOTICE, "backend type switched to: %s",
|
||||
GetBackendTypeDesc(MyBackendType));
|
||||
|
||||
ResetHideShardsDecision();
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
|
@ -245,7 +245,7 @@ InitializeTransactionManagement(void)
|
|||
* transaction independent connection management.
|
||||
*
|
||||
* NB: There should only ever be a single transaction callback in citus, the
|
||||
* ordering between the callbacks and thee actions within those callbacks
|
||||
* ordering between the callbacks and the actions within those callbacks
|
||||
* otherwise becomes too undeterministic / hard to reason about.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -30,8 +30,9 @@
|
|||
#include "distributed/transaction_recovery.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
#include "distributed/worker_transaction.h"
|
||||
#include "distributed/jsonbutils.h"
|
||||
#include "utils/memutils.h"
|
||||
|
||||
#include "utils/builtins.h"
|
||||
|
||||
static void SendCommandToMetadataWorkersParams(const char *command,
|
||||
const char *user, int parameterCount,
|
||||
|
@ -71,7 +72,7 @@ void
|
|||
SendCommandToWorkersAsUser(TargetWorkerSet targetWorkerSet, const char *nodeUser,
|
||||
const char *command)
|
||||
{
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock);
|
||||
|
||||
/* run commands serially */
|
||||
WorkerNode *workerNode = NULL;
|
||||
|
@ -184,7 +185,7 @@ void
|
|||
SendBareCommandListToMetadataWorkers(List *commandList)
|
||||
{
|
||||
TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES;
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock);
|
||||
char *nodeUser = CurrentUserName();
|
||||
|
||||
ErrorIfAnyMetadataNodeOutOfSync(workerNodeList);
|
||||
|
@ -225,7 +226,7 @@ SendCommandToMetadataWorkersParams(const char *command,
|
|||
const char *const *parameterValues)
|
||||
{
|
||||
List *workerNodeList = TargetWorkerSetNodeList(NON_COORDINATOR_METADATA_NODES,
|
||||
ShareLock);
|
||||
RowShareLock);
|
||||
|
||||
ErrorIfAnyMetadataNodeOutOfSync(workerNodeList);
|
||||
|
||||
|
@ -304,7 +305,7 @@ OpenConnectionsToWorkersInParallel(TargetWorkerSet targetWorkerSet, const char *
|
|||
{
|
||||
List *connectionList = NIL;
|
||||
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock);
|
||||
|
||||
WorkerNode *workerNode = NULL;
|
||||
foreach_ptr(workerNode, workerNodeList)
|
||||
|
@ -373,7 +374,7 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char *
|
|||
const char *const *parameterValues)
|
||||
{
|
||||
List *connectionList = NIL;
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, ShareLock);
|
||||
List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock);
|
||||
|
||||
UseCoordinatedTransaction();
|
||||
Use2PCForCoordinatedTransaction();
|
||||
|
@ -639,3 +640,65 @@ ErrorIfAnyMetadataNodeOutOfSync(List *metadataNodeList)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsWorkerTheCurrentNode checks if the given worker refers to the
|
||||
* the current node by comparing the server id of the worker and of the
|
||||
* current nodefrom pg_dist_node_metadata
|
||||
*/
|
||||
bool
|
||||
IsWorkerTheCurrentNode(WorkerNode *workerNode)
|
||||
{
|
||||
int connectionFlags = REQUIRE_METADATA_CONNECTION;
|
||||
|
||||
MultiConnection *workerConnection =
|
||||
GetNodeUserDatabaseConnection(connectionFlags,
|
||||
workerNode->workerName,
|
||||
workerNode->workerPort,
|
||||
CurrentUserName(),
|
||||
NULL);
|
||||
|
||||
const char *command =
|
||||
"SELECT metadata ->> 'server_id' AS server_id FROM pg_dist_node_metadata";
|
||||
|
||||
int resultCode = SendRemoteCommand(workerConnection, command);
|
||||
|
||||
if (resultCode == 0)
|
||||
{
|
||||
CloseConnection(workerConnection);
|
||||
return false;
|
||||
}
|
||||
|
||||
PGresult *result = GetRemoteCommandResult(workerConnection, true);
|
||||
|
||||
if (result == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
List *commandResult = ReadFirstColumnAsText(result);
|
||||
|
||||
PQclear(result);
|
||||
ForgetResults(workerConnection);
|
||||
|
||||
if ((list_length(commandResult) != 1))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
StringInfo resultInfo = (StringInfo) linitial(commandResult);
|
||||
char *workerServerId = resultInfo->data;
|
||||
|
||||
Datum metadata = DistNodeMetadata();
|
||||
text *currentServerIdTextP = ExtractFieldTextP(metadata, "server_id");
|
||||
|
||||
if (currentServerIdTextP == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
char *currentServerId = text_to_cstring(currentServerIdTextP);
|
||||
|
||||
return strcmp(workerServerId, currentServerId) == 0;
|
||||
}
|
||||
|
|
|
@ -303,9 +303,6 @@ MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId)
|
|||
void
|
||||
ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId)
|
||||
{
|
||||
ListCell *leftShardIntervalCell = NULL;
|
||||
ListCell *rightShardIntervalCell = NULL;
|
||||
|
||||
/* get sorted shard interval lists for both tables */
|
||||
List *leftShardIntervalList = LoadShardIntervalList(leftRelationId);
|
||||
List *rightShardIntervalList = LoadShardIntervalList(rightRelationId);
|
||||
|
@ -329,15 +326,11 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId)
|
|||
}
|
||||
|
||||
/* compare shard intervals one by one */
|
||||
forboth(leftShardIntervalCell, leftShardIntervalList,
|
||||
rightShardIntervalCell, rightShardIntervalList)
|
||||
ShardInterval *leftInterval = NULL;
|
||||
ShardInterval *rightInterval = NULL;
|
||||
forboth_ptr(leftInterval, leftShardIntervalList,
|
||||
rightInterval, rightShardIntervalList)
|
||||
{
|
||||
ShardInterval *leftInterval = (ShardInterval *) lfirst(leftShardIntervalCell);
|
||||
ShardInterval *rightInterval = (ShardInterval *) lfirst(rightShardIntervalCell);
|
||||
|
||||
ListCell *leftPlacementCell = NULL;
|
||||
ListCell *rightPlacementCell = NULL;
|
||||
|
||||
uint64 leftShardId = leftInterval->shardId;
|
||||
uint64 rightShardId = rightInterval->shardId;
|
||||
|
||||
|
@ -373,14 +366,11 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId)
|
|||
CompareShardPlacementsByNode);
|
||||
|
||||
/* compare shard placements one by one */
|
||||
forboth(leftPlacementCell, sortedLeftPlacementList,
|
||||
rightPlacementCell, sortedRightPlacementList)
|
||||
ShardPlacement *leftPlacement = NULL;
|
||||
ShardPlacement *rightPlacement = NULL;
|
||||
forboth_ptr(leftPlacement, sortedLeftPlacementList,
|
||||
rightPlacement, sortedRightPlacementList)
|
||||
{
|
||||
ShardPlacement *leftPlacement =
|
||||
(ShardPlacement *) lfirst(leftPlacementCell);
|
||||
ShardPlacement *rightPlacement =
|
||||
(ShardPlacement *) lfirst(rightPlacementCell);
|
||||
|
||||
/*
|
||||
* If shard placements are on different nodes, these shard
|
||||
* placements are not colocated.
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
#include "postgres.h"
|
||||
|
||||
#include "pg_version_compat.h"
|
||||
|
||||
#include "catalog/namespace.h"
|
||||
#include "catalog/pg_class.h"
|
||||
#include "catalog/pg_collation.h"
|
||||
#include "catalog/pg_type.h"
|
||||
|
||||
#include "utils/array.h"
|
||||
#include "utils/json.h"
|
||||
#include "distributed/jsonbutils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "fmgr.h"
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldJsonb gets value of fieldName from jsonbDoc and puts it
|
||||
* into result. If not found, returns false. Otherwise, returns true.
|
||||
* The field is returned as a Text* Datum if as_text is true, or a Jsonb*
|
||||
* Datum if as_text is false.
|
||||
*/
|
||||
static bool
|
||||
ExtractFieldJsonb(Datum jsonbDoc, const char *fieldName, Datum *result, bool as_text)
|
||||
{
|
||||
Datum pathArray[1] = { CStringGetTextDatum(fieldName) };
|
||||
bool pathNulls[1] = { false };
|
||||
bool typeByValue = false;
|
||||
char typeAlignment = 0;
|
||||
int16 typeLength = 0;
|
||||
int dimensions[1] = { 1 };
|
||||
int lowerbounds[1] = { 1 };
|
||||
|
||||
get_typlenbyvalalign(TEXTOID, &typeLength, &typeByValue, &typeAlignment);
|
||||
|
||||
ArrayType *pathArrayObject = construct_md_array(pathArray, pathNulls, 1, dimensions,
|
||||
lowerbounds, TEXTOID, typeLength,
|
||||
typeByValue, typeAlignment);
|
||||
Datum pathDatum = PointerGetDatum(pathArrayObject);
|
||||
|
||||
FmgrInfo fmgrInfo;
|
||||
|
||||
if (as_text)
|
||||
{
|
||||
fmgr_info(JsonbExtractPathTextFuncId(), &fmgrInfo);
|
||||
}
|
||||
else
|
||||
{
|
||||
fmgr_info(JsonbExtractPathFuncId(), &fmgrInfo);
|
||||
}
|
||||
|
||||
LOCAL_FCINFO(functionCallInfo, 2);
|
||||
InitFunctionCallInfoData(*functionCallInfo, &fmgrInfo, 2, DEFAULT_COLLATION_OID, NULL,
|
||||
NULL);
|
||||
|
||||
fcSetArg(functionCallInfo, 0, jsonbDoc);
|
||||
fcSetArg(functionCallInfo, 1, pathDatum);
|
||||
|
||||
*result = FunctionCallInvoke(functionCallInfo);
|
||||
return !functionCallInfo->isnull;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldBoolean gets value of fieldName from jsonbDoc, or returns
|
||||
* defaultValue if it doesn't exist.
|
||||
*/
|
||||
bool
|
||||
ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue)
|
||||
{
|
||||
Datum jsonbDatum = 0;
|
||||
bool found = ExtractFieldJsonb(jsonbDoc, fieldName, &jsonbDatum, false);
|
||||
if (!found)
|
||||
{
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
Datum boolDatum = DirectFunctionCall1(jsonb_bool, jsonbDatum);
|
||||
return DatumGetBool(boolDatum);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldTextP gets value of fieldName as text* from jsonbDoc, or
|
||||
* returns NULL if it doesn't exist.
|
||||
*/
|
||||
text *
|
||||
ExtractFieldTextP(Datum jsonbDoc, const char *fieldName)
|
||||
{
|
||||
Datum jsonbDatum = 0;
|
||||
|
||||
bool found = ExtractFieldJsonb(jsonbDoc, fieldName, &jsonbDatum, true);
|
||||
if (!found)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return DatumGetTextP(jsonbDatum);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExtractFieldJsonbDatum gets value of fieldName from jsonbDoc and puts it
|
||||
* into result. If not found, returns false. Otherwise, returns true.
|
||||
*/
|
||||
bool
|
||||
ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result)
|
||||
{
|
||||
return ExtractFieldJsonb(jsonbDoc, fieldName, result, false);
|
||||
}
|
|
@ -244,13 +244,10 @@ CompareStringList(List *list1, List *list2)
|
|||
return false;
|
||||
}
|
||||
|
||||
ListCell *cell1 = NULL;
|
||||
ListCell *cell2 = NULL;
|
||||
forboth(cell1, list1, cell2, list2)
|
||||
const char *str1 = NULL;
|
||||
const char *str2 = NULL;
|
||||
forboth_ptr(str1, list1, str2, list2)
|
||||
{
|
||||
const char *str1 = lfirst(cell1);
|
||||
const char *str2 = lfirst(cell2);
|
||||
|
||||
if (strcmp(str1, str2) != 0)
|
||||
{
|
||||
return false;
|
||||
|
@ -286,18 +283,16 @@ CreateStmtListByObjectAddress(const ObjectAddress *address)
|
|||
|
||||
case OCLASS_TSCONFIG:
|
||||
{
|
||||
/*
|
||||
* We do support TEXT SEARCH CONFIGURATION, however, we can't recreate the
|
||||
* object in 1 command. Since the returned text is compared to the create
|
||||
* statement sql we always want the sql to be different compared to the
|
||||
* canonical creation sql we return here, hence we return an empty string, as
|
||||
* that should never match the sql we have passed in for the creation.
|
||||
*/
|
||||
|
||||
List *stmts = GetCreateTextSearchConfigStatements(address);
|
||||
return DeparseTreeNodes(stmts);
|
||||
}
|
||||
|
||||
case OCLASS_TSDICT:
|
||||
{
|
||||
List *stmts = GetCreateTextSearchDictionaryStatements(address);
|
||||
return DeparseTreeNodes(stmts);
|
||||
}
|
||||
|
||||
case OCLASS_TYPE:
|
||||
{
|
||||
return list_make1(DeparseTreeNode(CreateTypeStmtByObjectAddress(address)));
|
||||
|
|
|
@ -27,12 +27,16 @@
|
|||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_utility.h"
|
||||
#include "distributed/coordinator_protocol.h"
|
||||
#include "distributed/commands/utility_hook.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/metadata/distobject.h"
|
||||
#include "distributed/multi_partitioning_utils.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "foreign/foreign.h"
|
||||
#include "tcop/utility.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
PG_FUNCTION_INFO_V1(worker_drop_distributed_table);
|
||||
PG_FUNCTION_INFO_V1(worker_drop_shell_table);
|
||||
|
@ -142,20 +146,11 @@ WorkerDropDistributedTable(Oid relationId)
|
|||
|
||||
UnmarkObjectDistributed(&distributedTableObject);
|
||||
|
||||
if (!IsObjectAddressOwnedByExtension(&distributedTableObject, NULL))
|
||||
{
|
||||
/*
|
||||
* If the table is owned by an extension, we cannot drop it, nor should we
|
||||
* until the user runs DROP EXTENSION. Therefore, we skip dropping the
|
||||
* table and only delete the metadata.
|
||||
*
|
||||
* We drop the table with cascade since other tables may be referring to it.
|
||||
*/
|
||||
performDeletion(&distributedTableObject, DROP_CASCADE,
|
||||
PERFORM_DELETION_INTERNAL);
|
||||
}
|
||||
|
||||
/* iterate over shardList to delete the corresponding rows */
|
||||
/*
|
||||
* Remove metadata before object's itself to make functions no-op within
|
||||
* drop event trigger for undistributed objects on worker nodes except
|
||||
* removing pg_dist_object entries.
|
||||
*/
|
||||
List *shardList = LoadShardList(relationId);
|
||||
uint64 *shardIdPointer = NULL;
|
||||
foreach_ptr(shardIdPointer, shardList)
|
||||
|
@ -176,6 +171,33 @@ WorkerDropDistributedTable(Oid relationId)
|
|||
|
||||
/* delete the row from pg_dist_partition */
|
||||
DeletePartitionRow(relationId);
|
||||
|
||||
/*
|
||||
* If the table is owned by an extension, we cannot drop it, nor should we
|
||||
* until the user runs DROP EXTENSION. Therefore, we skip dropping the
|
||||
* table.
|
||||
*/
|
||||
if (!IsObjectAddressOwnedByExtension(&distributedTableObject, NULL))
|
||||
{
|
||||
char *relName = get_rel_name(relationId);
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
|
||||
StringInfo dropCommand = makeStringInfo();
|
||||
appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE",
|
||||
IsForeignTable(relationId) ? " FOREIGN " : " ",
|
||||
quote_qualified_identifier(schemaName, relName));
|
||||
|
||||
Node *dropCommandNode = ParseTreeNode(dropCommand->data);
|
||||
|
||||
/*
|
||||
* We use ProcessUtilityParseTree (instead of performDeletion) to make sure that
|
||||
* we also drop objects that depend on the table and call the drop event trigger
|
||||
* which removes them from pg_dist_object.
|
||||
*/
|
||||
ProcessUtilityParseTree(dropCommandNode, dropCommand->data,
|
||||
PROCESS_UTILITY_QUERY, NULL, None_Receiver, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "commands/copy.h"
|
||||
#include "commands/tablecmds.h"
|
||||
#include "common/string.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "distributed/version_compat.h"
|
||||
|
@ -436,14 +437,11 @@ List *
|
|||
ColumnDefinitionList(List *columnNameList, List *columnTypeList)
|
||||
{
|
||||
List *columnDefinitionList = NIL;
|
||||
ListCell *columnNameCell = NULL;
|
||||
ListCell *columnTypeCell = NULL;
|
||||
|
||||
forboth(columnNameCell, columnNameList, columnTypeCell, columnTypeList)
|
||||
const char *columnName = NULL;
|
||||
const char *columnType = NULL;
|
||||
forboth_ptr(columnName, columnNameList, columnType, columnTypeList)
|
||||
{
|
||||
const char *columnName = (const char *) lfirst(columnNameCell);
|
||||
const char *columnType = (const char *) lfirst(columnTypeCell);
|
||||
|
||||
/*
|
||||
* We should have a SQL compatible column type declaration; we first
|
||||
* convert this type to PostgreSQL's type identifiers and modifiers.
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "miscadmin.h"
|
||||
|
||||
#include "catalog/index.h"
|
||||
#include "catalog/namespace.h"
|
||||
|
@ -47,6 +48,7 @@ static HideShardsMode HideShards = CHECK_APPLICATION_NAME;
|
|||
|
||||
static bool ShouldHideShards(void);
|
||||
static bool ShouldHideShardsInternal(void);
|
||||
static bool IsPgBgWorker(void);
|
||||
static bool FilterShardsFromPgclass(Node *node, void *context);
|
||||
static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno);
|
||||
|
||||
|
@ -202,12 +204,15 @@ RelationIsAKnownShard(Oid shardRelationId)
|
|||
}
|
||||
}
|
||||
|
||||
Relation relation = try_relation_open(shardRelationId, AccessShareLock);
|
||||
if (relation == NULL)
|
||||
/*
|
||||
* We do not take locks here, because that might block a query on pg_class.
|
||||
*/
|
||||
|
||||
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(shardRelationId)))
|
||||
{
|
||||
/* relation does not exist */
|
||||
return false;
|
||||
}
|
||||
relation_close(relation, NoLock);
|
||||
|
||||
/*
|
||||
* If the input relation is an index we simply replace the
|
||||
|
@ -331,6 +336,28 @@ ResetHideShardsDecision(void)
|
|||
static bool
|
||||
ShouldHideShardsInternal(void)
|
||||
{
|
||||
if (MyBackendType == B_BG_WORKER)
|
||||
{
|
||||
if (IsPgBgWorker())
|
||||
{
|
||||
/*
|
||||
* If a background worker belongs to Postgres, we should
|
||||
* never hide shards. For other background workers, enforce
|
||||
* the application_name check below.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if (MyBackendType != B_BACKEND)
|
||||
{
|
||||
/*
|
||||
* We are aiming only to hide shards from client
|
||||
* backends or certain background workers(see above),
|
||||
* not backends like walsender or checkpointer.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsCitusInternalBackend() || IsRebalancerInternalBackend())
|
||||
{
|
||||
/* we never hide shards from Citus */
|
||||
|
@ -369,6 +396,24 @@ ShouldHideShardsInternal(void)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* IsPgBgWorker returns true if the current background worker
|
||||
* belongs to Postgres.
|
||||
*/
|
||||
static bool
|
||||
IsPgBgWorker(void)
|
||||
{
|
||||
Assert(MyBackendType == B_BG_WORKER);
|
||||
|
||||
if (MyBgworkerEntry)
|
||||
{
|
||||
return strcmp(MyBgworkerEntry->bgw_library_name, "postgres") == 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FilterShardsFromPgclass adds a NOT relation_is_a_known_shard(oid) filter
|
||||
* to the security quals of pg_class RTEs.
|
||||
|
|
|
@ -25,6 +25,10 @@
|
|||
#include "columnar/columnar_compression.h"
|
||||
#include "columnar/columnar_metadata.h"
|
||||
|
||||
#define COLUMNAR_MODULE_NAME "citus_columnar"
|
||||
|
||||
#define COLUMNAR_SETOPTIONS_HOOK_SYM "ColumnarTableSetOptions_hook"
|
||||
|
||||
/* Defines for valid option names */
|
||||
#define OPTION_NAME_COMPRESSION_TYPE "compression"
|
||||
#define OPTION_NAME_STRIPE_ROW_COUNT "stripe_row_limit"
|
||||
|
@ -187,6 +191,10 @@ typedef enum StripeWriteStateEnum
|
|||
STRIPE_WRITE_IN_PROGRESS
|
||||
} StripeWriteStateEnum;
|
||||
|
||||
typedef bool (*ColumnarSupportsIndexAM_type)(char *);
|
||||
typedef const char *(*CompressionTypeStr_type)(CompressionType);
|
||||
typedef bool (*IsColumnarTableAmTable_type)(Oid);
|
||||
typedef bool (*ReadColumnarOptions_type)(Oid, ColumnarOptions *);
|
||||
|
||||
/* ColumnarReadState represents state of a columnar scan. */
|
||||
struct ColumnarReadState;
|
||||
|
@ -205,8 +213,8 @@ extern int columnar_compression_level;
|
|||
|
||||
/* called when the user changes options on the given relation */
|
||||
typedef void (*ColumnarTableSetOptions_hook_type)(Oid relid, ColumnarOptions options);
|
||||
extern ColumnarTableSetOptions_hook_type ColumnarTableSetOptions_hook;
|
||||
|
||||
extern void columnar_init(void);
|
||||
extern void columnar_init_gucs(void);
|
||||
|
||||
extern CompressionType ParseCompressionType(const char *compressionTypeString);
|
||||
|
@ -315,5 +323,4 @@ extern bool PendingWritesInUpperTransactions(Oid relfilenode,
|
|||
SubTransactionId currentSubXid);
|
||||
extern MemoryContext GetWriteContextForDebug(void);
|
||||
|
||||
|
||||
#endif /* COLUMNAR_H */
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* mod.h
|
||||
*
|
||||
* Type and function declarations for columnar
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#ifndef MOD_H
|
||||
#define MOD_H
|
||||
|
||||
/* Function declarations for extension loading and unloading */
|
||||
extern void columnar_init(void);
|
||||
extern void columnar_fini(void);
|
||||
|
||||
#endif /* MOD_H */
|
|
@ -29,6 +29,7 @@
|
|||
/* Function declarations for version independent Citus ruleutils wrapper functions */
|
||||
extern char * pg_get_extensiondef_string(Oid tableRelationId);
|
||||
extern Oid get_extension_schema(Oid ext_oid);
|
||||
extern char * get_extension_version(Oid extensionId);
|
||||
extern char * pg_get_serverdef_string(Oid tableRelationId);
|
||||
extern char * pg_get_sequencedef_string(Oid sequenceRelid);
|
||||
extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId);
|
||||
|
|
|
@ -123,6 +123,8 @@ typedef enum SearchForeignKeyColumnFlags
|
|||
|
||||
|
||||
/* aggregate.c - forward declarations */
|
||||
extern List * PreprocessDefineAggregateStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessDefineAggregateStmt(Node *node, const char *queryString);
|
||||
|
||||
/* cluster.c - forward declarations */
|
||||
|
@ -146,6 +148,7 @@ extern List * PreprocessDropCollationStmt(Node *stmt, const char *queryString,
|
|||
extern List * PreprocessAlterCollationOwnerStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PostprocessAlterCollationOwnerStmt(Node *node, const char *queryString);
|
||||
extern List * PreprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
|
@ -281,6 +284,7 @@ extern ObjectAddress RenameFunctionStmtObjectAddress(Node *stmt,
|
|||
bool missing_ok);
|
||||
extern List * PreprocessAlterFunctionOwnerStmt(Node *stmt, const char *queryString,
|
||||
ProcessUtilityContext processUtilityContext);
|
||||
extern List * PostprocessAlterFunctionOwnerStmt(Node *stmt, const char *queryString);
|
||||
extern ObjectAddress AlterFunctionOwnerObjectAddress(Node *stmt,
|
||||
bool missing_ok);
|
||||
extern List * PreprocessAlterFunctionSchemaStmt(Node *stmt, const char *queryString,
|
||||
|
@ -432,6 +436,7 @@ extern List * PreprocessAlterStatisticsStmt(Node *node, const char *queryString,
|
|||
extern List * PreprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PostprocessAlterStatisticsOwnerStmt(Node *node, const char *queryString);
|
||||
extern List * GetExplicitStatisticsCommandList(Oid relationId);
|
||||
extern List * GetExplicitStatisticsSchemaIdList(Oid relationId);
|
||||
extern List * GetAlterIndexStatisticsCommands(Oid indexOid);
|
||||
|
@ -476,49 +481,94 @@ extern bool ConstrTypeUsesIndex(ConstrType constrType);
|
|||
/* text_search.c - forward declarations */
|
||||
extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node,
|
||||
const char *queryString);
|
||||
extern List * PostprocessCreateTextSearchDictionaryStmt(Node *node,
|
||||
const char *queryString);
|
||||
extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address);
|
||||
extern List * GetCreateTextSearchDictionaryStatements(const ObjectAddress *address);
|
||||
extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address);
|
||||
extern List * CreateTextSearchDictDDLCommandsIdempotent(const ObjectAddress *address);
|
||||
extern List * PreprocessDropTextSearchConfigurationStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessDropTextSearchDictionaryStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessAlterTextSearchDictionaryStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessRenameTextSearchDictionaryStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessAlterTextSearchDictionarySchemaStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node,
|
||||
const char *queryString);
|
||||
extern List * PostprocessAlterTextSearchDictionarySchemaStmt(Node *node,
|
||||
const char *queryString);
|
||||
extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessTextSearchDictionaryCommentStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PreprocessAlterTextSearchDictionaryOwnerStmt(Node *node,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext
|
||||
processUtilityContext);
|
||||
extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node,
|
||||
const char *queryString);
|
||||
extern List * PostprocessAlterTextSearchDictionaryOwnerStmt(Node *node,
|
||||
const char *queryString);
|
||||
extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress CreateTextSearchDictObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress RenameTextSearchDictionaryStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress AlterTextSearchDictionaryStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress TextSearchDictCommentObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern ObjectAddress AlterTextSearchDictOwnerObjectAddress(Node *node,
|
||||
bool missing_ok);
|
||||
extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address);
|
||||
extern char * GenerateBackupNameForTextSearchDict(const ObjectAddress *address);
|
||||
extern List * get_ts_config_namelist(Oid tsconfigOid);
|
||||
|
||||
/* truncate.c - forward declarations */
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "lib/ilist.h"
|
||||
#include "pg_config.h"
|
||||
#include "portability/instr_time.h"
|
||||
#include "storage/latch.h"
|
||||
#include "utils/guc.h"
|
||||
#include "utils/hsearch.h"
|
||||
#include "utils/timestamp.h"
|
||||
|
@ -34,6 +35,10 @@
|
|||
/* application name used for internal connections in rebalancer */
|
||||
#define CITUS_REBALANCER_NAME "citus_rebalancer"
|
||||
|
||||
/* deal with waiteventset errors */
|
||||
#define WAIT_EVENT_SET_INDEX_NOT_INITIALIZED -1
|
||||
#define WAIT_EVENT_SET_INDEX_FAILED -2
|
||||
|
||||
/* forward declare, to avoid forcing large headers on everyone */
|
||||
struct pg_conn; /* target of the PGconn typedef */
|
||||
struct MemoryContextData;
|
||||
|
@ -284,6 +289,13 @@ extern bool IsCitusInternalBackend(void);
|
|||
extern bool IsRebalancerInternalBackend(void);
|
||||
extern void MarkConnectionConnected(MultiConnection *connection);
|
||||
|
||||
/* waiteventset utilities */
|
||||
extern int CitusAddWaitEventSetToSet(WaitEventSet *set, uint32 events, pgsocket fd,
|
||||
Latch *latch, void *user_data);
|
||||
|
||||
extern bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events,
|
||||
Latch *latch);
|
||||
|
||||
/* time utilities */
|
||||
extern double MillisecondsPassedSince(instr_time moment);
|
||||
extern long MillisecondsToTimeout(instr_time start, long msAfterStart);
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
#include "nodes/parsenodes.h"
|
||||
|
||||
extern bool EnableCTEInlining;
|
||||
|
||||
extern void RecursivelyInlineCtesInQueryTree(Query *query);
|
||||
extern bool QueryTreeContainsInlinableCTE(Query *queryTree);
|
||||
|
||||
|
|
|
@ -63,14 +63,21 @@ extern char * DeparseAlterTableStmt(Node *node);
|
|||
|
||||
extern void QualifyAlterTableSchemaStmt(Node *stmt);
|
||||
|
||||
/* foward declarations fro deparse_text_search.c */
|
||||
extern char * DeparseCreateTextSearchStmt(Node *node);
|
||||
extern char * DeparseDropTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseRenameTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node);
|
||||
extern char * DeparseTextSearchConfigurationCommentStmt(Node *node);
|
||||
/* forward declarations for deparse_text_search.c */
|
||||
extern char * DeparseAlterTextSearchConfigurationOwnerStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchDictionaryOwnerStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchDictionarySchemaStmt(Node *node);
|
||||
extern char * DeparseAlterTextSearchDictionaryStmt(Node *node);
|
||||
extern char * DeparseCreateTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseCreateTextSearchDictionaryStmt(Node *node);
|
||||
extern char * DeparseDropTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseDropTextSearchDictionaryStmt(Node *node);
|
||||
extern char * DeparseRenameTextSearchConfigurationStmt(Node *node);
|
||||
extern char * DeparseRenameTextSearchDictionaryStmt(Node *node);
|
||||
extern char * DeparseTextSearchConfigurationCommentStmt(Node *node);
|
||||
extern char * DeparseTextSearchDictionaryCommentStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_schema_stmts.c */
|
||||
extern char * DeparseCreateSchemaStmt(Node *node);
|
||||
|
@ -153,13 +160,19 @@ extern char * DeparseAlterExtensionStmt(Node *stmt);
|
|||
/* forward declarations for deparse_database_stmts.c */
|
||||
extern char * DeparseAlterDatabaseOwnerStmt(Node *node);
|
||||
|
||||
/* forward declatations for depatse_text_search_stmts.c */
|
||||
extern void QualifyDropTextSearchConfigurationStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchConfigurationStmt(Node *node);
|
||||
extern void QualifyRenameTextSearchConfigurationStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node);
|
||||
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
|
||||
/* forward declatations for deparse_text_search_stmts.c */
|
||||
extern void QualifyAlterTextSearchConfigurationOwnerStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchConfigurationStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchDictionaryOwnerStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchDictionarySchemaStmt(Node *node);
|
||||
extern void QualifyAlterTextSearchDictionaryStmt(Node *node);
|
||||
extern void QualifyDropTextSearchConfigurationStmt(Node *node);
|
||||
extern void QualifyDropTextSearchDictionaryStmt(Node *node);
|
||||
extern void QualifyRenameTextSearchConfigurationStmt(Node *node);
|
||||
extern void QualifyRenameTextSearchDictionaryStmt(Node *node);
|
||||
extern void QualifyTextSearchConfigurationCommentStmt(Node *node);
|
||||
extern void QualifyTextSearchDictionaryCommentStmt(Node *node);
|
||||
|
||||
/* forward declarations for deparse_sequence_stmts.c */
|
||||
extern char * DeparseDropSequenceStmt(Node *node);
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* jsonbutils.h
|
||||
*
|
||||
* Declarations for public utility functions related to jsonb.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#ifndef CITUS_JSONBUTILS_H
|
||||
#define CITUS_JSONBUTILS_H
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
bool ExtractFieldJsonbDatum(Datum jsonbDoc, const char *fieldName, Datum *result);
|
||||
text * ExtractFieldTextP(Datum jsonbDoc, const char *fieldName);
|
||||
bool ExtractFieldBoolean(Datum jsonbDoc, const char *fieldName, bool defaultValue);
|
||||
|
||||
#endif /* CITUS_JSONBUTILS_H */
|
|
@ -80,6 +80,59 @@ typedef struct ListCellAndListWrapper
|
|||
(((var) = lfirst_oid(var ## CellDoNotUse)) || true); \
|
||||
var ## CellDoNotUse = lnext_compat(l, var ## CellDoNotUse))
|
||||
|
||||
/*
|
||||
* forboth_ptr -
|
||||
* a convenience macro which loops through two lists of pointers at the same
|
||||
* time, without needing a ListCell. It only needs two declared pointer
|
||||
* variables to store the pointer of each of the two cells in.
|
||||
*/
|
||||
#define forboth_ptr(var1, l1, var2, l2) \
|
||||
for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
|
||||
*(var2 ## CellDoNotUse) = list_head(l2); \
|
||||
(var1 ## CellDoNotUse) != NULL && \
|
||||
(var2 ## CellDoNotUse) != NULL && \
|
||||
(((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \
|
||||
(((var2) = lfirst(var2 ## CellDoNotUse)) || true); \
|
||||
var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \
|
||||
var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \
|
||||
)
|
||||
|
||||
/*
|
||||
* forboth_ptr_oid -
|
||||
* a convenience macro which loops through two lists at the same time. The
|
||||
* first list should contain pointers and the second list should contain
|
||||
* Oids. It does not need a ListCell to do this. It only needs two declared
|
||||
* variables to store the pointer and the Oid of each of the two cells in.
|
||||
*/
|
||||
#define forboth_ptr_oid(var1, l1, var2, l2) \
|
||||
for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
|
||||
*(var2 ## CellDoNotUse) = list_head(l2); \
|
||||
(var1 ## CellDoNotUse) != NULL && \
|
||||
(var2 ## CellDoNotUse) != NULL && \
|
||||
(((var1) = lfirst(var1 ## CellDoNotUse)) || true) && \
|
||||
(((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \
|
||||
var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \
|
||||
var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \
|
||||
)
|
||||
|
||||
/*
|
||||
* forboth_int_oid -
|
||||
* a convenience macro which loops through two lists at the same time. The
|
||||
* first list should contain integers and the second list should contain
|
||||
* Oids. It does not need a ListCell to do this. It only needs two declared
|
||||
* variables to store the int and the Oid of each of the two cells in.
|
||||
*/
|
||||
#define forboth_int_oid(var1, l1, var2, l2) \
|
||||
for (ListCell *(var1 ## CellDoNotUse) = list_head(l1), \
|
||||
*(var2 ## CellDoNotUse) = list_head(l2); \
|
||||
(var1 ## CellDoNotUse) != NULL && \
|
||||
(var2 ## CellDoNotUse) != NULL && \
|
||||
(((var1) = lfirst_int(var1 ## CellDoNotUse)) || true) && \
|
||||
(((var2) = lfirst_oid(var2 ## CellDoNotUse)) || true); \
|
||||
var1 ## CellDoNotUse = lnext_compat(l1, var1 ## CellDoNotUse), \
|
||||
var2 ## CellDoNotUse = lnext_compat(l2, var2 ## CellDoNotUse) \
|
||||
)
|
||||
|
||||
/*
|
||||
* foreach_ptr_append -
|
||||
* a convenience macro which loops through a pointer List and can append list
|
||||
|
|
|
@ -256,6 +256,7 @@ extern Oid PgTableVisibleFuncId(void);
|
|||
extern Oid CitusTableVisibleFuncId(void);
|
||||
extern Oid RelationIsAKnownShardFuncId(void);
|
||||
extern Oid JsonbExtractPathFuncId(void);
|
||||
extern Oid JsonbExtractPathTextFuncId(void);
|
||||
|
||||
/* enum oids */
|
||||
extern Oid PrimaryNodeRoleId(void);
|
||||
|
|
|
@ -290,7 +290,8 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection,
|
|||
uint64 *availableBytes,
|
||||
uint64 *totalBytes);
|
||||
extern void ExecuteQueryViaSPI(char *query, int SPIOK);
|
||||
extern void EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId);
|
||||
extern void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid
|
||||
ownerRelationId);
|
||||
extern void AlterSequenceType(Oid seqOid, Oid typeOid);
|
||||
extern void EnsureRelationHasCompatibleSequenceTypes(Oid relationId);
|
||||
#endif /* METADATA_UTILITY_H */
|
||||
|
|
|
@ -11,10 +11,17 @@
|
|||
#ifndef SHARED_LIBRARY_INIT_H
|
||||
#define SHARED_LIBRARY_INIT_H
|
||||
|
||||
#include "columnar/columnar.h"
|
||||
|
||||
#define GUC_STANDARD 0
|
||||
#define MAX_SHARD_COUNT 64000
|
||||
#define MAX_SHARD_REPLICATION_FACTOR 100
|
||||
|
||||
extern ColumnarSupportsIndexAM_type extern_ColumnarSupportsIndexAM;
|
||||
extern CompressionTypeStr_type extern_CompressionTypeStr;
|
||||
extern IsColumnarTableAmTable_type extern_IsColumnarTableAmTable;
|
||||
extern ReadColumnarOptions_type extern_ReadColumnarOptions;
|
||||
|
||||
extern void StartupCitusBackend(void);
|
||||
|
||||
#endif /* SHARED_LIBRARY_INIT_H */
|
||||
|
|
|
@ -70,4 +70,6 @@ extern void RemoveWorkerTransaction(const char *nodeName, int32 nodePort);
|
|||
/* helper functions for worker transactions */
|
||||
extern bool IsWorkerTransactionActive(void);
|
||||
|
||||
extern bool IsWorkerTheCurrentNode(WorkerNode *workerNode);
|
||||
|
||||
#endif /* WORKER_TRANSACTION_H */
|
||||
|
|
|
@ -7,3 +7,4 @@ test: multi_test_catalog_views
|
|||
test: multi_create_table multi_behavioral_analytics_create_table
|
||||
test: multi_create_table_superuser multi_behavioral_analytics_create_table_superuser
|
||||
test: multi_load_data multi_load_data_superuser tablespace
|
||||
test: check_mx
|
||||
|
|
|
@ -263,3 +263,23 @@ s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_b
|
|||
|
||||
# node id in run_command_on_all_nodes warning
|
||||
s/Error on node with node id [0-9]+/Error on node with node id xxxxx/g
|
||||
|
||||
# Temp schema names in error messages regarding dependencies that we cannot distribute
|
||||
#
|
||||
# 1) Schema of the depending object in the error message:
|
||||
#
|
||||
# e.g.:
|
||||
# WARNING: "function pg_temp_3.f(bigint)" has dependency on unsupported object "<foo>"
|
||||
# will be replaced with
|
||||
# WARNING: "function pg_temp_xxx.f(bigint)" has dependency on unsupported object "<foo>"
|
||||
s/^(WARNING|ERROR)(: "[a-z\ ]+ )pg_temp_[0-9]+(\..*" has dependency on unsupported object ".*")$/\1\2pg_temp_xxx\3/g
|
||||
|
||||
# 2) Schema of the depending object in the error detail:
|
||||
s/^(DETAIL: "[a-z\ ]+ )pg_temp_[0-9]+(\..*" will be created only locally)$/\1pg_temp_xxx\2/g
|
||||
|
||||
# 3) Schema that the object depends in the error message:
|
||||
# e.g.:
|
||||
# WARNING: "function func(bigint)" has dependency on unsupported object "schema pg_temp_3"
|
||||
# will be replaced with
|
||||
# WARNING: "function func(bigint)" has dependency on unsupported object "schema pg_temp_xxx"
|
||||
s/^(WARNING|ERROR)(: "[a-z\ ]+ .*" has dependency on unsupported object) "schema pg_temp_[0-9]+"$/\1\2 "schema pg_temp_xxx"/g
|
||||
|
|
|
@ -69,6 +69,10 @@ So the infrastructure tests:
|
|||
When you want to add a new test, you can add the create statements to `create_schedule` and add the sql queries to `sql_schedule`.
|
||||
If you are adding Citus UDFs that should be a NO-OP for Postgres, make sure to override the UDFs in `postgres.sql`.
|
||||
|
||||
If the test needs to be skipped in some configs, you can do that by adding the test names in the `skip_tests` array for
|
||||
each config. The test files associated with the skipped test will be set to empty so the test will pass without the actual test
|
||||
being run.
|
||||
|
||||
## Adding a new config
|
||||
|
||||
You can add your new config to `config.py`. Make sure to extend either `CitusDefaultClusterConfig` or `CitusMXBaseClusterConfig`.
|
||||
|
|
|
@ -55,7 +55,6 @@ def run_for_config(config, lock, sql_schedule_name):
|
|||
if config.user == cfg.REGULAR_USER_NAME:
|
||||
common.create_role(
|
||||
config.bindir,
|
||||
config.coordinator_port(),
|
||||
config.node_name_to_ports.values(),
|
||||
config.user,
|
||||
)
|
||||
|
@ -129,13 +128,24 @@ def copy_test_files(config):
|
|||
colon_index = line.index(":")
|
||||
line = line[colon_index + 1 :].strip()
|
||||
test_names = line.split(" ")
|
||||
copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path)
|
||||
copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path, config)
|
||||
|
||||
|
||||
def copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path):
|
||||
def copy_test_files_with_names(test_names, sql_dir_path, expected_dir_path, config):
|
||||
for test_name in test_names:
|
||||
# make empty files for the skipped tests
|
||||
if test_name in config.skip_tests:
|
||||
expected_sql_file = os.path.join(sql_dir_path, test_name + ".sql")
|
||||
open(expected_sql_file, 'x').close()
|
||||
|
||||
expected_out_file = os.path.join(expected_dir_path, test_name + ".out")
|
||||
open(expected_out_file, 'x').close()
|
||||
|
||||
continue
|
||||
|
||||
sql_name = os.path.join("./sql", test_name + ".sql")
|
||||
output_name = os.path.join("./expected", test_name + ".out")
|
||||
|
||||
shutil.copy(sql_name, sql_dir_path)
|
||||
if os.path.isfile(output_name):
|
||||
# it might be the first time we run this test and the expected file
|
||||
|
|
|
@ -3,6 +3,7 @@ import shutil
|
|||
import sys
|
||||
import subprocess
|
||||
import atexit
|
||||
import concurrent.futures
|
||||
|
||||
import utils
|
||||
from utils import USER, cd
|
||||
|
@ -24,9 +25,19 @@ def initialize_temp_dir_if_not_exists(temp_dir):
|
|||
os.chmod(temp_dir, 0o777)
|
||||
|
||||
|
||||
def parallel_run(function, items, *args, **kwargs):
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures = [
|
||||
executor.submit(function, item, *args, **kwargs)
|
||||
for item in items
|
||||
]
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
def initialize_db_for_cluster(pg_path, rel_data_path, settings, node_names):
|
||||
subprocess.run(["mkdir", rel_data_path], check=True)
|
||||
for node_name in node_names:
|
||||
|
||||
def initialize(node_name):
|
||||
abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name))
|
||||
command = [
|
||||
os.path.join(pg_path, "initdb"),
|
||||
|
@ -34,10 +45,18 @@ def initialize_db_for_cluster(pg_path, rel_data_path, settings, node_names):
|
|||
abs_data_path,
|
||||
"--username",
|
||||
USER,
|
||||
"--no-sync",
|
||||
# --allow-group-access is used to ensure we set permissions on
|
||||
# private keys correctly
|
||||
"--allow-group-access",
|
||||
"--encoding",
|
||||
"UTF8"
|
||||
]
|
||||
subprocess.run(command, check=True)
|
||||
add_settings(abs_data_path, settings)
|
||||
|
||||
parallel_run(initialize, node_names)
|
||||
|
||||
|
||||
def add_settings(abs_data_path, settings):
|
||||
conf_path = os.path.join(abs_data_path, "postgresql.conf")
|
||||
|
@ -49,15 +68,17 @@ def add_settings(abs_data_path, settings):
|
|||
conf_file.write(setting)
|
||||
|
||||
|
||||
def create_role(pg_path, port, node_ports, user_name):
|
||||
for port in node_ports:
|
||||
command = "SELECT worker_create_or_alter_role('{}', 'CREATE ROLE {} WITH LOGIN CREATEROLE CREATEDB;', NULL)".format(
|
||||
def create_role(pg_path, node_ports, user_name):
|
||||
def create(port):
|
||||
command = "SET citus.enable_ddl_propagation TO OFF; SELECT worker_create_or_alter_role('{}', 'CREATE ROLE {} WITH LOGIN CREATEROLE CREATEDB;', NULL)".format(
|
||||
user_name, user_name
|
||||
)
|
||||
utils.psql(pg_path, port, command)
|
||||
command = "GRANT CREATE ON DATABASE postgres to {}".format(user_name)
|
||||
command = "SET citus.enable_ddl_propagation TO OFF; GRANT CREATE ON DATABASE postgres to {}".format(user_name)
|
||||
utils.psql(pg_path, port, command)
|
||||
|
||||
parallel_run(create, node_ports)
|
||||
|
||||
|
||||
def coordinator_should_haveshards(pg_path, port):
|
||||
command = "SELECT citus_set_node_property('localhost', {}, 'shouldhaveshards', true)".format(
|
||||
|
@ -67,7 +88,7 @@ def coordinator_should_haveshards(pg_path, port):
|
|||
|
||||
|
||||
def start_databases(pg_path, rel_data_path, node_name_to_ports, logfile_prefix, env_variables):
|
||||
for node_name in node_name_to_ports.keys():
|
||||
def start(node_name):
|
||||
abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name))
|
||||
node_port = node_name_to_ports[node_name]
|
||||
command = [
|
||||
|
@ -89,6 +110,11 @@ def start_databases(pg_path, rel_data_path, node_name_to_ports, logfile_prefix,
|
|||
|
||||
subprocess.run(command, check=True)
|
||||
|
||||
parallel_run(start, node_name_to_ports.keys())
|
||||
|
||||
# We don't want parallel shutdown here because that will fail when it's
|
||||
# tried in this atexit call with an error like:
|
||||
# cannot schedule new futures after interpreter shutdown
|
||||
atexit.register(
|
||||
stop_databases,
|
||||
pg_path,
|
||||
|
@ -96,13 +122,16 @@ def start_databases(pg_path, rel_data_path, node_name_to_ports, logfile_prefix,
|
|||
node_name_to_ports,
|
||||
logfile_prefix,
|
||||
no_output=True,
|
||||
parallel=False,
|
||||
)
|
||||
|
||||
|
||||
def create_citus_extension(pg_path, node_ports):
|
||||
for port in node_ports:
|
||||
def create(port):
|
||||
utils.psql(pg_path, port, "CREATE EXTENSION citus;")
|
||||
|
||||
parallel_run(create, node_ports)
|
||||
|
||||
|
||||
def run_pg_regress(pg_path, pg_srcdir, port, schedule):
|
||||
should_exit = True
|
||||
|
@ -215,9 +244,9 @@ def logfile_name(logfile_prefix, node_name):
|
|||
|
||||
|
||||
def stop_databases(
|
||||
pg_path, rel_data_path, node_name_to_ports, logfile_prefix, no_output=False
|
||||
pg_path, rel_data_path, node_name_to_ports, logfile_prefix, no_output=False, parallel=True
|
||||
):
|
||||
for node_name in node_name_to_ports.keys():
|
||||
def stop(node_name):
|
||||
abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name))
|
||||
node_port = node_name_to_ports[node_name]
|
||||
command = [
|
||||
|
@ -239,6 +268,12 @@ def stop_databases(
|
|||
else:
|
||||
subprocess.call(command)
|
||||
|
||||
if parallel:
|
||||
parallel_run(stop, node_name_to_ports.keys())
|
||||
else:
|
||||
for node_name in node_name_to_ports.keys():
|
||||
stop(node_name)
|
||||
|
||||
|
||||
def initialize_citus_cluster(bindir, datadir, settings, config):
|
||||
# In case there was a leftover from previous runs, stop the databases
|
||||
|
|
|
@ -58,10 +58,7 @@ port_lock = threading.Lock()
|
|||
|
||||
def should_include_config(class_name):
|
||||
|
||||
if inspect.isclass(class_name) and (
|
||||
issubclass(class_name, CitusMXBaseClusterConfig)
|
||||
or issubclass(class_name, CitusDefaultClusterConfig)
|
||||
):
|
||||
if inspect.isclass(class_name) and issubclass(class_name, CitusDefaultClusterConfig):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -97,12 +94,13 @@ class CitusBaseClusterConfig(object, metaclass=NewInitCaller):
|
|||
self.temp_dir = CITUS_ARBITRARY_TEST_DIR
|
||||
self.worker_amount = 2
|
||||
self.user = REGULAR_USER_NAME
|
||||
self.is_mx = False
|
||||
self.is_mx = True
|
||||
self.is_citus = True
|
||||
self.name = type(self).__name__
|
||||
self.settings = {
|
||||
"shared_preload_libraries": "citus",
|
||||
"log_error_verbosity": "terse",
|
||||
"fsync": False,
|
||||
"citus.node_conninfo": "sslmode=prefer",
|
||||
"citus.enable_repartition_joins": True,
|
||||
"citus.repartition_join_bucket_count_per_node": 2,
|
||||
|
@ -111,6 +109,7 @@ class CitusBaseClusterConfig(object, metaclass=NewInitCaller):
|
|||
self.new_settings = {}
|
||||
self.add_coordinator_to_metadata = False
|
||||
self.env_variables = {}
|
||||
self.skip_tests = []
|
||||
|
||||
def post_init(self):
|
||||
self._init_node_name_ports()
|
||||
|
@ -167,12 +166,6 @@ class CitusDefaultClusterConfig(CitusBaseClusterConfig):
|
|||
self.add_coordinator_to_metadata = True
|
||||
|
||||
|
||||
class CitusMXBaseClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.is_mx = True
|
||||
|
||||
|
||||
class CitusUpgradeConfig(CitusBaseClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
|
@ -183,6 +176,7 @@ class CitusUpgradeConfig(CitusBaseClusterConfig):
|
|||
self.user = SUPER_USER_NAME
|
||||
self.mixed_mode = arguments["--mixed"]
|
||||
self.fixed_port = 57635
|
||||
self.is_mx = False
|
||||
|
||||
|
||||
class PostgresConfig(CitusDefaultClusterConfig):
|
||||
|
@ -204,19 +198,19 @@ class CitusSingleNodeClusterConfig(CitusDefaultClusterConfig):
|
|||
common.coordinator_should_haveshards(self.bindir, self.coordinator_port())
|
||||
|
||||
|
||||
class CitusSingleWorkerClusterConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSingleWorkerClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.worker_amount = 1
|
||||
|
||||
|
||||
class CitusSuperUserDefaultClusterConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSuperUserDefaultClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.user = SUPER_USER_NAME
|
||||
|
||||
|
||||
class CitusThreeWorkersManyShardsClusterConfig(CitusMXBaseClusterConfig):
|
||||
class CitusThreeWorkersManyShardsClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {"citus.shard_count": 191}
|
||||
|
@ -226,7 +220,7 @@ class CitusThreeWorkersManyShardsClusterConfig(CitusMXBaseClusterConfig):
|
|||
common.coordinator_should_haveshards(self.bindir, self.coordinator_port())
|
||||
|
||||
|
||||
class CitusSmallSharedPoolSizeConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSmallSharedPoolSizeConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -235,7 +229,7 @@ class CitusSmallSharedPoolSizeConfig(CitusMXBaseClusterConfig):
|
|||
}
|
||||
|
||||
|
||||
class CitusSmallExecutorPoolSizeConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSmallExecutorPoolSizeConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -243,7 +237,7 @@ class CitusSmallExecutorPoolSizeConfig(CitusMXBaseClusterConfig):
|
|||
}
|
||||
|
||||
|
||||
class CitusSequentialExecutionConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSequentialExecutionConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -251,7 +245,7 @@ class CitusSequentialExecutionConfig(CitusMXBaseClusterConfig):
|
|||
}
|
||||
|
||||
|
||||
class CitusCacheManyConnectionsConfig(CitusMXBaseClusterConfig):
|
||||
class CitusCacheManyConnectionsConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -259,7 +253,7 @@ class CitusCacheManyConnectionsConfig(CitusMXBaseClusterConfig):
|
|||
}
|
||||
|
||||
|
||||
class CitusUnusualExecutorConfig(CitusMXBaseClusterConfig):
|
||||
class CitusUnusualExecutorConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -280,7 +274,7 @@ class CitusUnusualExecutorConfig(CitusMXBaseClusterConfig):
|
|||
self.env_variables = {'PGAPPNAME' : 'test_app'}
|
||||
|
||||
|
||||
class CitusSmallCopyBuffersConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSmallCopyBuffersConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -290,7 +284,7 @@ class CitusSmallCopyBuffersConfig(CitusMXBaseClusterConfig):
|
|||
}
|
||||
|
||||
|
||||
class CitusUnusualQuerySettingsConfig(CitusMXBaseClusterConfig):
|
||||
class CitusUnusualQuerySettingsConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {
|
||||
|
@ -304,6 +298,13 @@ class CitusUnusualQuerySettingsConfig(CitusMXBaseClusterConfig):
|
|||
"citus.values_materialization_threshold": "0",
|
||||
}
|
||||
|
||||
self.skip_tests = [
|
||||
# Creating a reference table from a table referred to by a fk
|
||||
# requires the table with the fk to be converted to a citus_local_table.
|
||||
# As of c11, there is no way to do that through remote execution so this test
|
||||
# will fail
|
||||
"arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade"]
|
||||
|
||||
|
||||
class CitusSingleNodeSingleShardClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
|
@ -315,19 +316,26 @@ class CitusSingleNodeSingleShardClusterConfig(CitusDefaultClusterConfig):
|
|||
common.coordinator_should_haveshards(self.bindir, self.coordinator_port())
|
||||
|
||||
|
||||
class CitusShardReplicationFactorClusterConfig(CitusMXBaseClusterConfig):
|
||||
class CitusShardReplicationFactorClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {"citus.shard_replication_factor": 2}
|
||||
self.skip_tests = [
|
||||
# citus does not support foreign keys in distributed tables
|
||||
# when citus.shard_replication_factor > 2
|
||||
"arbitrary_configs_truncate_partition_create", "arbitrary_configs_truncate_partition",
|
||||
# citus does not support modifying a partition when
|
||||
# citus.shard_replication_factor > 2
|
||||
"arbitrary_configs_truncate_cascade_create", "arbitrary_configs_truncate_cascade"]
|
||||
|
||||
|
||||
class CitusSingleShardClusterConfig(CitusMXBaseClusterConfig):
|
||||
class CitusSingleShardClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.new_settings = {"citus.shard_count": 1}
|
||||
|
||||
|
||||
class CitusNonMxClusterConfig(CitusMXBaseClusterConfig):
|
||||
class CitusNonMxClusterConfig(CitusDefaultClusterConfig):
|
||||
def __init__(self, arguments):
|
||||
super().__init__(arguments)
|
||||
self.is_mx = False
|
||||
|
|
|
@ -4,3 +4,9 @@ test: dropped_columns_create_load distributed_planning_create_load
|
|||
test: local_dist_join_load
|
||||
test: partitioned_indexes_create
|
||||
test: connectivity_checks
|
||||
test: views_create
|
||||
test: sequences_create
|
||||
test: index_create
|
||||
test: arbitrary_configs_truncate_create
|
||||
test: arbitrary_configs_truncate_cascade_create
|
||||
test: arbitrary_configs_truncate_partition_create
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
--
|
||||
-- ADD_COORDINATOR
|
||||
--
|
||||
-- node trying to add itself without specifying groupid => 0 should error out
|
||||
SELECT master_add_node('localhost', :master_port);
|
||||
ERROR: Node cannot add itself as a worker.
|
||||
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
|
||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
||||
-- adding the same node again should return the existing nodeid
|
||||
|
|
|
@ -1114,6 +1114,7 @@ create function dummy_fnc(a dummy_tbl, d double precision) RETURNS dummy_tbl
|
|||
-- test in tx block
|
||||
-- shouldn't distribute, as citus.create_object_propagation is set to deferred
|
||||
BEGIN;
|
||||
SET LOCAL citus.create_object_propagation TO deferred;
|
||||
create aggregate dependent_agg (float8) (stype=dummy_tbl, sfunc=dummy_fnc);
|
||||
COMMIT;
|
||||
-- verify not distributed
|
||||
|
@ -1188,6 +1189,39 @@ DROP TABLE dummy_tbl CASCADE;
|
|||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to function dummy_fnc(dummy_tbl,double precision)
|
||||
drop cascades to function dependent_agg(double precision)
|
||||
-- Show that polymorphic aggregates with zero-argument works
|
||||
CREATE FUNCTION stfnp_zero_arg(int[]) RETURNS int[] AS
|
||||
'select $1' LANGUAGE SQL;
|
||||
CREATE FUNCTION ffp_zero_arg(anyarray) RETURNS anyarray AS
|
||||
'select $1' LANGUAGE SQL;
|
||||
CREATE AGGREGATE zero_arg_agg(*) (SFUNC = stfnp_zero_arg, STYPE = int4[],
|
||||
FINALFUNC = ffp_zero_arg, INITCOND = '{}');
|
||||
CREATE TABLE zero_arg_agg_table(f1 int, f2 int[]);
|
||||
SELECT create_distributed_table('zero_arg_agg_table','f1');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO zero_arg_agg_table VALUES(1, array[1]);
|
||||
INSERT INTO zero_arg_agg_table VALUES(1, array[11]);
|
||||
SELECT zero_arg_agg(*) from zero_arg_agg_table;
|
||||
zero_arg_agg
|
||||
---------------------------------------------------------------------
|
||||
{}
|
||||
(1 row)
|
||||
|
||||
-- Show that after dropping a table on which functions and aggregates depending on
|
||||
-- pg_dist_object is consistent on coordinator and worker node.
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid)::text
|
||||
FROM pg_catalog.pg_dist_object
|
||||
EXCEPT
|
||||
SELECT unnest(result::text[]) AS unnested_result
|
||||
FROM run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) from pg_catalog.pg_dist_object$$);
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SET citus.create_object_propagation TO automatic;
|
||||
begin;
|
||||
create type typ1 as (a int);
|
||||
|
@ -1202,5 +1236,17 @@ SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid
|
|||
(localhost,57638,t,aggregate_support.dependent_agg)
|
||||
(2 rows)
|
||||
|
||||
CREATE AGGREGATE newavg (
|
||||
sfunc = int4_avg_accum, basetype = int4, stype = _int8,
|
||||
finalfunc = int8_avg,
|
||||
initcond1 = '{0,0}'
|
||||
);
|
||||
SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%newavg%';$$);
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,aggregate_support.newavg)
|
||||
(localhost,57638,t,aggregate_support.newavg)
|
||||
(2 rows)
|
||||
|
||||
set client_min_messages to error;
|
||||
drop schema aggregate_support cascade;
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
SET search_path TO truncate_tests_schema;
|
||||
-- Test truncate rollback on a basic table
|
||||
SELECT COUNT(*) FROM basic_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
TRUNCATE basic_table;
|
||||
SELECT COUNT(*) FROM basic_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
SELECT COUNT(*) FROM basic_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
-- Test truncate on a basic table
|
||||
SELECT COUNT(*) FROM basic_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
TRUNCATE basic_table;
|
||||
SELECT COUNT(*) FROM basic_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Test trucate rollback on partitioned table
|
||||
SELECT COUNT(*) FROM partitioned_table_0;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
TRUNCATE partitioned_table;
|
||||
SELECT COUNT(*) FROM partitioned_table_0;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
SELECT COUNT(*) FROM partitioned_table_0;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
-- Test truncate a partioned table
|
||||
SELECT COUNT(*) FROM partitioned_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM partitioned_table_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
TRUNCATE partitioned_table;
|
||||
SELECT COUNT(*) FROM partitioned_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM partitioned_table_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
SET search_path TO truncate_cascade_tests_schema;
|
||||
-- Test truncate error on table with dependencies
|
||||
TRUNCATE table_with_pk;
|
||||
ERROR: cannot truncate a table referenced in a foreign key constraint
|
||||
DETAIL: Table "table_with_fk_1" references "table_with_pk".
|
||||
HINT: Truncate table "table_with_fk_1" at the same time, or use TRUNCATE ... CASCADE.
|
||||
-- Test truncate rollback on table with dependencies
|
||||
SELECT COUNT(*) FROM table_with_fk_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_2;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
TRUNCATE table_with_pk CASCADE;
|
||||
SELECT COUNT(*) FROM table_with_fk_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_2;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
SELECT COUNT(*) FROM table_with_fk_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_2;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
-- Test truncate on table with dependencies
|
||||
SELECT COUNT(*) FROM table_with_pk;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_2;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
TRUNCATE table_with_pk CASCADE;
|
||||
SELECT COUNT(*) FROM table_with_pk;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM table_with_fk_2;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
CREATE SCHEMA truncate_cascade_tests_schema;
|
||||
SET search_path TO truncate_cascade_tests_schema;
|
||||
-- tables connected with foreign keys
|
||||
CREATE TABLE table_with_pk(a bigint PRIMARY KEY);
|
||||
CREATE TABLE table_with_fk_1(a bigint, b bigint, FOREIGN KEY (b) REFERENCES table_with_pk(a));
|
||||
CREATE TABLE table_with_fk_2(a bigint, b bigint, FOREIGN KEY (b) REFERENCES table_with_pk(a));
|
||||
-- distribute tables
|
||||
SELECT create_reference_table('table_with_pk');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('table_with_fk_1', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('table_with_fk_2');
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- fill tables with data
|
||||
INSERT INTO table_with_pk(a) SELECT n FROM generate_series(1, 10) n;
|
||||
INSERT INTO table_with_fk_1(a, b) SELECT n, n FROM generate_series(1, 10) n;
|
||||
INSERT INTO table_with_fk_2(a, b) SELECT n, n FROM generate_series(1, 10) n;
|
|
@ -0,0 +1,26 @@
|
|||
CREATE SCHEMA truncate_tests_schema;
|
||||
SET search_path TO truncate_tests_schema;
|
||||
-- simple table
|
||||
CREATE TABLE basic_table(a int);
|
||||
-- partioned table
|
||||
CREATE TABLE partitioned_table(a int) PARTITION BY RANGE(a);
|
||||
CREATE TABLE partitioned_table_0 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (1) TO (6);
|
||||
CREATE TABLE partitioned_table_1 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (6) TO (11);
|
||||
-- distribute tables
|
||||
SELECT create_distributed_table('basic_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('partitioned_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- fill tables with data
|
||||
INSERT INTO basic_table(a) SELECT n FROM generate_series(1, 10) n;
|
||||
INSERT INTO partitioned_table(a) SELECT n FROM generate_series(1, 10) n;
|
|
@ -0,0 +1,39 @@
|
|||
SET search_path TO truncate_partition_tests_schema;
|
||||
-- Test truncate on a partition
|
||||
SELECT COUNT(*) FROM partitioned_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM partitioned_table_0;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM partitioned_table_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
TRUNCATE partitioned_table_0;
|
||||
SELECT COUNT(*) FROM partitioned_table;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM partitioned_table_0;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM partitioned_table_1;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
CREATE SCHEMA truncate_partition_tests_schema;
|
||||
SET search_path TO truncate_partition_tests_schema;
|
||||
-- partioned table
|
||||
CREATE TABLE partitioned_table(a int) PARTITION BY RANGE(a);
|
||||
CREATE TABLE partitioned_table_0 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (1) TO (6);
|
||||
CREATE TABLE partitioned_table_1 PARTITION OF partitioned_table
|
||||
FOR VALUES FROM (6) TO (11);
|
||||
-- distribute tables
|
||||
SELECT create_distributed_table('partitioned_table', 'a');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- fill tables with data
|
||||
INSERT INTO partitioned_table(a) SELECT n FROM generate_series(1, 10) n;
|
|
@ -10,3 +10,13 @@ SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary';
|
|||
t
|
||||
(1 row)
|
||||
|
||||
-- Show that pg_dist_object entities are same on all nodes
|
||||
SELECT pg_identify_object_as_address(classid, objid, objsubid)::text
|
||||
FROM pg_catalog.pg_dist_object
|
||||
EXCEPT
|
||||
SELECT unnest(result::text[]) AS unnested_result
|
||||
FROM run_command_on_workers($$SELECT array_agg(pg_identify_object_as_address(classid, objid, objsubid)) from pg_catalog.pg_dist_object$$);
|
||||
pg_identify_object_as_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
|
|
@ -185,3 +185,5 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_sto
|
|||
f
|
||||
(1 row)
|
||||
|
||||
-- make sure citus_columnar can be loaded
|
||||
LOAD 'citus_columnar';
|
||||
|
|
|
@ -183,5 +183,18 @@ INSERT INTO data_types_table SELECT * FROM data_types_table ON CONFLICT (dist_ke
|
|||
INSERT INTO data_types_table SELECT * FROM data_types_table LIMIT 100000 ON CONFLICT (dist_key) DO UPDATE SET useless_column = 10;
|
||||
INSERT INTO data_types_table (dist_key, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38)
|
||||
SELECT dist_key+1, col1, col2, col3, col4, col5, col6, col70, col7, col8, col9, col10, col11, col12, col13, col14, col15, col16, col17, col18, col19, col20, col21, col22, col23, col24, col25, col26, col27, col28, col29, col32, col33, col34, col35, col36, col37, col38 FROM data_types_table ON CONFLICT (dist_key) DO UPDATE SET useless_column = 10;
|
||||
-- test type names that start with underscore
|
||||
CREATE TYPE underscore_type_1 AS (a INT);
|
||||
CREATE TYPE _underscore_type_1 AS (a INT);
|
||||
CREATE TYPE underscore_type_2 AS ENUM ('a');
|
||||
CREATE TYPE _underscore_type_2 AS ENUM ('a');
|
||||
SELECT result FROM run_command_on_all_nodes('SELECT count(*) FROM pg_type WHERE typname LIKE ''%underscore\_type%''');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
8
|
||||
8
|
||||
(3 rows)
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA data_types CASCADE;
|
||||
|
|
|
@ -179,3 +179,41 @@ HINT: Connect to the coordinator and run it again.
|
|||
SET citus.enable_ddl_propagation TO off;
|
||||
DROP SCHEMA collation_creation_on_worker;
|
||||
SET citus.enable_ddl_propagation TO on;
|
||||
\c - - - :master_port
|
||||
-- will skip trying to propagate the collation due to temp schema
|
||||
CREATE COLLATION pg_temp.temp_collation (provider = icu, locale = 'de-u-co-phonebk');
|
||||
WARNING: "collation pg_temp_xxx.temp_collation" has dependency on unsupported object "schema pg_temp_xxx"
|
||||
DETAIL: "collation pg_temp_xxx.temp_collation" will be created only locally
|
||||
SET client_min_messages TO ERROR;
|
||||
CREATE USER alter_collation_user;
|
||||
SELECT 1 FROM run_command_on_workers('CREATE USER alter_collation_user');
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
CREATE COLLATION alter_collation FROM "C";
|
||||
ALTER COLLATION alter_collation OWNER TO alter_collation_user;
|
||||
SELECT result FROM run_command_on_all_nodes('
|
||||
SELECT collowner::regrole FROM pg_collation WHERE collname = ''alter_collation'';
|
||||
');
|
||||
result
|
||||
---------------------------------------------------------------------
|
||||
alter_collation_user
|
||||
alter_collation_user
|
||||
alter_collation_user
|
||||
(3 rows)
|
||||
|
||||
DROP COLLATION alter_collation;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP USER alter_collation_user;
|
||||
SELECT 1 FROM run_command_on_workers('DROP USER alter_collation_user');
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
RESET client_min_messages;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue