diff --git a/.circleci/config.yml b/.circleci/config.yml
index 3b2b62cac..8addafccc 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -6,16 +6,19 @@ orbs:
parameters:
image_suffix:
type: string
- default: '-vbab548a'
+ default: '-v87fd773'
pg14_version:
type: string
- default: '14.8'
+ default: '14.9'
pg15_version:
type: string
- default: '15.3'
+ default: '15.4'
+ pg16_version:
+ type: string
+ default: '16.0'
upgrade_pg_versions:
type: string
- default: '14.8-15.3'
+ default: '14.9-15.4-16.0'
style_checker_tools_version:
type: string
default: '0.8.18'
@@ -722,6 +725,10 @@ workflows:
name: build-15
pg_major: 15
image_tag: '<< pipeline.parameters.pg15_version >>'
+ - build:
+ name: build-16
+ pg_major: 16
+ image_tag: '<< pipeline.parameters.pg16_version >>'
- check-style
- check-sql-snapshots
@@ -872,6 +879,79 @@ workflows:
image: citus/failtester
make: check-failure
+ - test-citus: &test-citus-16
+ name: 'test-16_check-split'
+ make: check-split
+ pg_major: 16
+ image_tag: '<< pipeline.parameters.pg16_version >>'
+ requires: [build-16]
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-enterprise'
+ make: check-enterprise
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-enterprise-isolation'
+ make: check-enterprise-isolation
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-enterprise-isolation-logicalrep-1'
+ make: check-enterprise-isolation-logicalrep-1
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-enterprise-isolation-logicalrep-2'
+ make: check-enterprise-isolation-logicalrep-2
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-enterprise-isolation-logicalrep-3'
+ make: check-enterprise-isolation-logicalrep-3
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-enterprise-failure'
+ image: citus/failtester
+ make: check-enterprise-failure
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-multi'
+ make: check-multi
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-multi-1'
+ make: check-multi-1
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-mx'
+ make: check-multi-mx
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-vanilla'
+ make: check-vanilla
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-isolation'
+ make: check-isolation
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-operations'
+ make: check-operations
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-follower-cluster'
+ make: check-follower-cluster
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-columnar'
+ make: check-columnar
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-columnar-isolation'
+ make: check-columnar-isolation
+ - test-citus:
+ <<: *test-citus-16
+ name: 'test-16_check-failure'
+ image: citus/failtester
+ make: check-failure
+
- test-pytest:
name: 'test-14_pytest'
pg_major: 14
@@ -884,6 +964,12 @@ workflows:
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
+ - test-pytest:
+ name: 'test-16_pytest'
+ pg_major: 16
+ image_tag: '<< pipeline.parameters.pg16_version >>'
+ requires: [build-16]
+
- tap-test-citus:
name: 'test-15_tap-cdc'
suite: cdc
@@ -891,6 +977,13 @@ workflows:
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
+ - tap-test-citus:
+ name: 'test-16_tap-cdc'
+ suite: cdc
+ pg_major: 16
+ image_tag: '<< pipeline.parameters.pg16_version >>'
+ requires: [build-16]
+
- test-arbitrary-configs:
name: 'test-14_check-arbitrary-configs'
pg_major: 14
@@ -903,6 +996,12 @@ workflows:
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
+ - test-arbitrary-configs:
+ name: 'test-16_check-arbitrary-configs'
+ pg_major: 16
+ image_tag: '<< pipeline.parameters.pg16_version >>'
+ requires: [build-16]
+
- test-query-generator:
name: 'test-14_check-query-generator'
pg_major: 14
@@ -915,6 +1014,12 @@ workflows:
image_tag: '<< pipeline.parameters.pg15_version >>'
requires: [build-15]
+ - test-query-generator:
+ name: 'test-16_check-query-generator'
+ pg_major: 16
+ image_tag: '<< pipeline.parameters.pg16_version >>'
+ requires: [build-16]
+
- test-pg-upgrade:
name: 'test-14-15_check-pg-upgrade'
old_pg_major: 14
@@ -922,6 +1027,20 @@ workflows:
image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
requires: [build-14, build-15]
+ - test-pg-upgrade:
+ name: 'test-15-16_check-pg-upgrade'
+ old_pg_major: 15
+ new_pg_major: 16
+ image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
+ requires: [build-15, build-16]
+
+ - test-pg-upgrade:
+ name: 'test-14-16_check-pg-upgrade'
+ old_pg_major: 14
+ new_pg_major: 16
+ image_tag: '<< pipeline.parameters.upgrade_pg_versions >>'
+ requires: [build-14, build-16]
+
- test-citus-upgrade:
name: test-14_check-citus-upgrade
pg_major: 14
@@ -968,7 +1087,28 @@ workflows:
- test-15_check-split
- test-15_check-arbitrary-configs
- test-15_check-query-generator
+ - test-16_check-multi
+ - test-16_check-multi-1
+ - test-16_check-mx
+ - test-16_check-vanilla
+ - test-16_check-isolation
+ - test-16_check-operations
+ - test-16_check-follower-cluster
+ - test-16_check-columnar
+ - test-16_check-columnar-isolation
+ - test-16_check-failure
+ - test-16_check-enterprise
+ - test-16_check-enterprise-isolation
+ - test-16_check-enterprise-isolation-logicalrep-1
+ - test-16_check-enterprise-isolation-logicalrep-2
+ - test-16_check-enterprise-isolation-logicalrep-3
+ - test-16_check-enterprise-failure
+ - test-16_check-split
+ - test-16_check-arbitrary-configs
+ - test-16_check-query-generator
- test-14-15_check-pg-upgrade
+ - test-15-16_check-pg-upgrade
+ - test-14-16_check-pg-upgrade
- test-14_check-citus-upgrade
- ch_benchmark:
diff --git a/.gitattributes b/.gitattributes
index 84765433b..42f42cd25 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -28,6 +28,7 @@ src/backend/distributed/utils/citus_outfuncs.c -citus-style
src/backend/distributed/deparser/ruleutils_13.c -citus-style
src/backend/distributed/deparser/ruleutils_14.c -citus-style
src/backend/distributed/deparser/ruleutils_15.c -citus-style
+src/backend/distributed/deparser/ruleutils_16.c -citus-style
src/backend/distributed/commands/index_pg_source.c -citus-style
src/include/distributed/citus_nodes.h -citus-style
diff --git a/.github/packaging/validate_build_output.sh b/.github/packaging/validate_build_output.sh
index 200b1a188..64098811e 100755
--- a/.github/packaging/validate_build_output.sh
+++ b/.github/packaging/validate_build_output.sh
@@ -1,3 +1,18 @@
+#!/bin/bash
+
+set -ex
+
+# Function to get the OS version
+get_rpm_os_version() {
+ if [[ -f /etc/centos-release ]]; then
+ cat /etc/centos-release | awk '{print $4}'
+ elif [[ -f /etc/oracle-release ]]; then
+ cat /etc/oracle-release | awk '{print $5}'
+ else
+ echo "Unknown"
+ fi
+}
+
package_type=${1}
# Since $HOME is set in GH_Actions as /github/home, pyenv fails to create virtualenvs.
@@ -10,11 +25,24 @@ pyenv versions
pyenv virtualenv ${PACKAGING_PYTHON_VERSION} packaging_env
pyenv activate packaging_env
-git clone -b v0.8.24 --depth=1 https://github.com/citusdata/tools.git tools
+git clone -b v0.8.27 --depth=1 https://github.com/citusdata/tools.git tools
python3 -m pip install -r tools/packaging_automation/requirements.txt
+
+
+echo "Package type: ${package_type}"
+echo "OS version: $(get_rpm_os_version)"
+
+ # if os version is centos 7 or oracle linux 7, then remove urllib3 with pip uninstall and install urllib3<2.0.0 with pip install
+if [[ ${package_type} == "rpm" && $(get_rpm_os_version) == 7* ]]; then
+ python3 -m pip uninstall -y urllib3
+ python3 -m pip install 'urllib3<2'
+fi
+
python3 -m tools.packaging_automation.validate_build_output --output_file output.log \
--ignore_file .github/packaging/packaging_ignore.yml \
--package_type ${package_type}
pyenv deactivate
# Set $HOME back to /github/home
export HOME=${GITHUB_HOME}
+
+# Print the output to the console
diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml
index aecf8876c..356a590a4 100644
--- a/.github/workflows/packaging-test-pipelines.yml
+++ b/.github/workflows/packaging-test-pipelines.yml
@@ -6,6 +6,10 @@ on:
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
get_postgres_versions_from_file:
@@ -43,7 +47,7 @@ jobs:
- oraclelinux-7
- oraclelinux-8
- centos-7
- - centos-8
+ - almalinux-8
- almalinux-9
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
@@ -73,8 +77,18 @@ jobs:
- name: Make
run: |
+ git config --global --add safe.directory ${GITHUB_WORKSPACE}
make CFLAGS="-Wno-missing-braces" -sj$(cat /proc/cpuinfo | grep "core id" | wc -l) 2>&1 | tee -a output.log
+ # Check the exit code of the make command
+ make_exit_code=${PIPESTATUS[0]}
+
+ # If the make command returned a non-zero exit code, exit with the same code
+ if [[ $make_exit_code -ne 0 ]]; then
+ echo "make command failed with exit code $make_exit_code"
+ exit $make_exit_code
+ fi
+
- name: Make install
run: |
make CFLAGS="-Wno-missing-braces" install 2>&1 | tee -a output.log
@@ -109,10 +123,8 @@ jobs:
- debian-buster-all
- debian-bookworm-all
- debian-bullseye-all
- - ubuntu-bionic-all
- ubuntu-focal-all
- ubuntu-jammy-all
- - ubuntu-kinetic-all
POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }}
@@ -141,9 +153,22 @@ jobs:
make clean
- name: Make
+ shell: bash
run: |
+ set -e
+ git config --global --add safe.directory ${GITHUB_WORKSPACE}
make -sj$(cat /proc/cpuinfo | grep "core id" | wc -l) 2>&1 | tee -a output.log
+ # Check the exit code of the make command
+ make_exit_code=${PIPESTATUS[0]}
+
+ # If the make command returned a non-zero exit code, exit with the same code
+ if [[ $make_exit_code -ne 0 ]]; then
+ echo "make command failed with exit code $make_exit_code"
+ exit $make_exit_code
+ fi
+
+
- name: Make install
run: |
make install 2>&1 | tee -a output.log
diff --git a/CHANGELOG.md b/CHANGELOG.md
index de9bfeeb8..3a2b2ce99 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,142 @@
+### citus v12.1.0 (September 12, 2023) ###
+
+* Adds support for PostgreSQL 16.0 (#7173)
+
+* Add `citus_schema_move()` function which moves tables within a
+ distributed schema to another node (#7180)
+
+* Adds `citus_pause_node()` UDF that allows pausing the node with given id
+ (#7089)
+
+* Makes sure to enforce shard level colocation with the GUC
+ `citus.enable_non_colocated_router_query_pushdown` (#7076)
+
+* Allows creating reference / distributed-schema tables from local tables added
+ to metadata and that use identity columns (#7131)
+
+* Propagates `BUFFER_USAGE_LIMIT` option in `VACUUM` and `ANALYZE` (#7114)
+
+* Propagates `PROCESS_MAIN`, `SKIP_DATABASE_STATS`, `ONLY_DATABASE_STATS`
+ options in `VACUUM` (#7114)
+
+* Propagates `GENERIC_PLAN` option in `EXPLAIN` (#7141)
+
+* Propagates "rules" option in `CREATE COLLATION` (#7185)
+
+* Propagates `GRANT`/ `REVOKE` for database privileges (#7109)
+
+* Adds TRUNCATE trigger support on Citus foreign tables (#7170)
+
+* Removes `pg_send_cancellation` (#7135)
+
+* Prevents unnecessarily pulling the data into coordinator for some
+ `INSERT .. SELECT` queries that target a single-shard group (#7077)
+
+* Makes sure that rebalancer throws an error if replication factor is greater
+ than the shard allowed node count. Also makes sure to avoid moving a shard
+ to a node that it already exists on. (#7074)
+
+* Fixes a bug that may appear during 2PC recovery when there are multiple
+ databases (#7174)
+
+* Fixes a bug that could cause `COPY` logic to skip data in case of
+ out-of-memory (#7152)
+
+* Fixes a bug that causes an unexpected error when adding a column with
+ a `NULL` constraint (#7093)
+
+* Fixes `PROCESS_TOAST` default value to `true` (#7122)
+
+* Improves the error thrown when there is datatype mismatch in `MERGE ON`
+ (#7081)
+
+### citus v12.0.0 (July 11, 2023) ###
+
+* Adds support for schema-based sharding.
+ While `citus.enable_schema_based_sharding` GUC allows sharding the database
+ based on newly created schemas, `citus_schema_distribute()` allows doing so
+ for the existing schemas. Distributed schemas used for sharding the database
+ can be listed by using the view `citus_schemas`, monitored by using the view
+ `citus_stat_schemas`, and undistributed by using the udf
+ `citus_schema_undistribute()`
+ (#6866, #6979, #6933, #6936 and many others)
+
+* Supports MERGE command across non-colocated distributed tables/subqueries,
+ reference tables and joins on non-distribution columns (#6927)
+
+* Drops PG13 Support (#7002, #7007)
+
+* Changes default rebalance strategy to by_disk_size (#7033)
+
+* Changes by_disk_size rebalance strategy to have a base size (#7035)
+
+* Improves citus_tables view performance (#7018)
+
+* Improves tenant monitoring performance (#6868)
+
+* Introduces the GUC `citus.stat_tenants_untracked_sample_rate` for sampling in
+ tenant monitoring (#7026)
+
+* Adds CPU usage to citus_stat_tenants (#6844)
+
+* Propagates `ALTER SCHEMA .. OWNER TO ..` commands to worker (#6987)
+
+* Allows `ADD COLUMN` in command string with other commands (#7032)
+
+* Allows `DROP CONSTRAINT` in command string with other commands (#7012)
+
+* Makes sure to properly handle index storage options for `ADD CONSTRAINT
+ `/ COLUMN commands (#7032)
+
+* Makes sure to properly handle `IF NOT EXISTS` for `ADD COLUMN` commands
+ (#7032)
+
+* Allows using generated identity column based on int/smallint when creating
+ a distributed table with the limitation of not being able perform DMLs on
+ identity columns from worker nodes (#7008)
+
+* Supports custom cast from / to timestamptz in time partition management UDFs
+ (#6923)
+
+* Optimizes pushdown planner on memory and cpu (#6945)
+
+* Changes citus_shard_sizes view's table_name column to shard_id (#7003)
+
+* The GUC search_path is now reported when it is updated (#6983)
+
+* Disables citus.enable_non_colocated_router_query_pushdown GUC by default to
+ ensure generating a consistent distributed plan for the queries that
+ reference non-colocated distributed tables (#6909)
+
+* Disallows MERGE with filters that prune down to zero shards (#6946)
+
+* Makes sure to take `shouldhaveshards` setting into account for a node when
+ planning rebalance steps (#6887)
+
+* Improves the compatibility with other extension by forwarding to existing
+ emit_log_hook in our log hook (#6877)
+
+* Fixes wrong result when using `NOT MATCHED` with MERGE command (#6943)
+
+* Fixes querying the view `citus_shard_sizes` when there are too many shards
+ (#7018)
+
+* Fixes a bug related to type casts from other types to text/varchar (#6391)
+
+* Fixes propagating `CREATE SCHEMA AUTHORIZATION ..` with no schema name
+ (#7015)
+
+* Fixes an error when creating a FOREIGN KEY without a name referencing a schema
+ qualified table (#6986)
+
+* Fixes a rare bug which mostly happens with queries that contain both outer
+ join and where clauses (#6857)
+
+* Fixes a bug related to propagation of schemas when pg_dist_node is empty
+ (#6900)
+
+* Fixes a crash when a query is locally executed with explain analyze (#6892)
+
### citus v11.3.0 (May 2, 2023) ###
* Introduces CDC implementation for Citus using logical replication
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..f9ba8cf65
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,9 @@
+# Microsoft Open Source Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+
+Resources:
+
+- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
+- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
+- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
diff --git a/Makefile b/Makefile
index 098b7c207..e42d0ffd3 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ endif
include Makefile.global
-all: extension pg_send_cancellation
+all: extension
# build columnar only
@@ -40,22 +40,14 @@ clean-full:
install-downgrades:
$(MAKE) -C src/backend/distributed/ install-downgrades
-install-all: install-headers install-pg_send_cancellation
+install-all: install-headers
$(MAKE) -C src/backend/columnar/ install-all
$(MAKE) -C src/backend/distributed/ install-all
-# build citus_send_cancellation binary
-pg_send_cancellation:
- $(MAKE) -C src/bin/pg_send_cancellation/ all
-install-pg_send_cancellation: pg_send_cancellation
- $(MAKE) -C src/bin/pg_send_cancellation/ install
-clean-pg_send_cancellation:
- $(MAKE) -C src/bin/pg_send_cancellation/ clean
-.PHONY: pg_send_cancellation install-pg_send_cancellation clean-pg_send_cancellation
# Add to generic targets
-install: install-extension install-headers install-pg_send_cancellation
-clean: clean-extension clean-pg_send_cancellation
+install: install-extension install-headers
+clean: clean-extension
# apply or check style
reindent:
diff --git a/README.md b/README.md
index ae67dadca..2ee07765c 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-| **
The Citus database is 100% open source.
![]()
Learn what's new in the [Citus 11.3 release blog](https://www.citusdata.com/blog/2023/05/05/whats-new-in-citus-11-3-multi-tenant-saas/) and the [Citus Updates page](https://www.citusdata.com/updates/).
**|
+| **
The Citus database is 100% open source.
![]()
Learn what's new in the [Citus 12.0 release blog](https://www.citusdata.com/blog/2023/07/18/citus-12-schema-based-sharding-comes-to-postgres/) and the [Citus Updates page](https://www.citusdata.com/updates/).
**|
|---|
@@ -8,7 +8,7 @@
[](https://docs.citusdata.com/)
[](https://stackoverflow.com/questions/tagged/citus)
-[Slack](https://citus-public.slack.com/)
+[](https://slack.citusdata.com/)
[](https://app.codecov.io/gh/citusdata/citus)
[](https://twitter.com/intent/follow?screen_name=citusdata)
@@ -38,6 +38,7 @@ Since Citus is an extension to Postgres, you can use Citus with the latest Postg
- [Why Citus?](#why-citus)
- [Getting Started](#getting-started)
- [Using Citus](#using-citus)
+- [Schema-based sharding](#schema-based-sharding)
- [Setting up with High Availability](#setting-up-with-high-availability)
- [Documentation](#documentation)
- [Architecture](#architecture)
@@ -94,14 +95,14 @@ Install packages on Ubuntu / Debian:
```bash
curl https://install.citusdata.com/community/deb.sh > add-citus-repo.sh
sudo bash add-citus-repo.sh
-sudo apt-get -y install postgresql-15-citus-11.3
+sudo apt-get -y install postgresql-15-citus-12.0
```
Install packages on CentOS / Red Hat:
```bash
curl https://install.citusdata.com/community/rpm.sh > add-citus-repo.sh
sudo bash add-citus-repo.sh
-sudo yum install -y citus113_15
+sudo yum install -y citus120_15
```
To add Citus to your local PostgreSQL database, add the following to `postgresql.conf`:
@@ -347,6 +348,45 @@ When using columnar storage, you should only load data in batch using `COPY` or
To learn more about columnar storage, check out the [columnar storage README](https://github.com/citusdata/citus/blob/master/src/backend/columnar/README.md).
+## Schema-based sharding
+
+Available since Citus 12.0, [schema-based sharding](https://docs.citusdata.com/en/stable/get_started/concepts.html#schema-based-sharding) is the shared database, separate schema model, the schema becomes the logical shard within the database. Multi-tenant apps can a use a schema per tenant to easily shard along the tenant dimension. Query changes are not required and the application usually only needs a small modification to set the proper search_path when switching tenants. Schema-based sharding is an ideal solution for microservices, and for ISVs deploying applications that cannot undergo the changes required to onboard row-based sharding.
+
+### Creating distributed schemas
+
+You can turn an existing schema into a distributed schema by calling `citus_schema_distribute`:
+
+```sql
+SELECT citus_schema_distribute('user_service');
+```
+
+Alternatively, you can set `citus.enable_schema_based_sharding` to have all newly created schemas be automatically converted into distributed schemas:
+
+```sql
+SET citus.enable_schema_based_sharding TO ON;
+
+CREATE SCHEMA AUTHORIZATION user_service;
+CREATE SCHEMA AUTHORIZATION time_service;
+CREATE SCHEMA AUTHORIZATION ping_service;
+```
+
+### Running queries
+
+Queries will be properly routed to schemas based on `search_path` or by explicitly using the schema name in the query.
+
+For [microservices](https://docs.citusdata.com/en/stable/get_started/tutorial_microservices.html) you would create a USER per service matching the schema name, hence the default `search_path` would contain the schema name. When connected the user queries would be automatically routed and no changes to the microservice would be required.
+
+```sql
+CREATE USER user_service;
+CREATE SCHEMA AUTHORIZATION user_service;
+```
+
+For typical multi-tenant applications, you would set the search path to the tenant schema name in your application:
+
+```sql
+SET search_path = tenant_name, public;
+```
+
## Setting up with High Availability
One of the most popular high availability solutions for PostgreSQL, [Patroni 3.0](https://github.com/zalando/patroni), has [first class support for Citus 10.0 and above](https://patroni.readthedocs.io/en/latest/citus.html#citus), additionally since Citus 11.2 ships with improvements for smoother node switchover in Patroni.
@@ -414,6 +454,8 @@ Citus is uniquely capable of scaling both analytical and transactional workloads
Example multi-tenant SaaS users: [Copper](https://www.citusdata.com/customers/copper), [Salesloft](https://fivetran.com/case-studies/replicating-sharded-databases-a-case-study-of-salesloft-citus-data-and-fivetran), [ConvertFlow](https://www.citusdata.com/customers/convertflow)
+- **[Microservices](https://docs.citusdata.com/en/stable/get_started/tutorial_microservices.html)**: Citus supports schema based sharding, which allows distributing regular database schemas across many machines. This sharding methodology fits nicely with typical Microservices architecture, where storage is fully owned by the service hence can’t share the same schema definition with other tenants. Citus allows distributing horizontally scalable state across services, solving one of the [main problems](https://stackoverflow.blog/2020/11/23/the-macro-problem-with-microservices/) of microservices.
+
- **Geospatial**:
Because of the powerful [PostGIS](https://postgis.net/) extension to Postgres that adds support for geographic objects into Postgres, many people run spatial/GIS applications on top of Postgres. And since spatial location information has become part of our daily life, well, there are more geospatial applications than ever. When your Postgres database needs to scale out to handle an increased workload, Citus is a good fit.
@@ -431,6 +473,12 @@ Citus is uniquely capable of scaling both analytical and transactional workloads
Citus is built on and of open source, and we welcome your contributions. The [CONTRIBUTING.md](CONTRIBUTING.md) file explains how to get started developing the Citus extension itself and our code quality guidelines.
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
## Stay Connected
- **Twitter**: Follow us [@citusdata](https://twitter.com/citusdata) to track the latest posts & updates on what’s happening.
diff --git a/configure b/configure
index 9150c1ab2..a0c978dea 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for Citus 12.0devel.
+# Generated by GNU Autoconf 2.69 for Citus 12.2devel.
#
#
# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
@@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='Citus'
PACKAGE_TARNAME='citus'
-PACKAGE_VERSION='12.0devel'
-PACKAGE_STRING='Citus 12.0devel'
+PACKAGE_VERSION='12.2devel'
+PACKAGE_STRING='Citus 12.2devel'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1262,7 +1262,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures Citus 12.0devel to adapt to many kinds of systems.
+\`configure' configures Citus 12.2devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1324,7 +1324,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of Citus 12.0devel:";;
+ short | recursive ) echo "Configuration of Citus 12.2devel:";;
esac
cat <<\_ACEOF
@@ -1429,7 +1429,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-Citus configure 12.0devel
+Citus configure 12.2devel
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -1912,7 +1912,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by Citus $as_me 12.0devel, which was
+It was created by Citus $as_me 12.2devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2588,7 +2588,7 @@ fi
if test "$with_pg_version_check" = no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num (skipped compatibility check)" >&5
$as_echo "$as_me: building against PostgreSQL $version_num (skipped compatibility check)" >&6;}
-elif test "$version_num" != '14' -a "$version_num" != '15'; then
+elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then
as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5
@@ -5393,7 +5393,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by Citus $as_me 12.0devel, which was
+This file was extended by Citus $as_me 12.2devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -5455,7 +5455,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-Citus config.status 12.0devel
+Citus config.status 12.2devel
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
diff --git a/configure.ac b/configure.ac
index 078e14c42..2a4c7a21a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -5,7 +5,7 @@
# everyone needing autoconf installed, the resulting files are checked
# into the SCM.
-AC_INIT([Citus], [12.0devel])
+AC_INIT([Citus], [12.2devel])
AC_COPYRIGHT([Copyright (c) Citus Data, Inc.])
# we'll need sed and awk for some of the version commands
@@ -80,7 +80,7 @@ AC_SUBST(with_pg_version_check)
if test "$with_pg_version_check" = no; then
AC_MSG_NOTICE([building against PostgreSQL $version_num (skipped compatibility check)])
-elif test "$version_num" != '14' -a "$version_num" != '15'; then
+elif test "$version_num" != '14' -a "$version_num" != '15' -a "$version_num" != '16'; then
AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.])
else
AC_MSG_NOTICE([building against PostgreSQL $version_num])
diff --git a/src/backend/columnar/citus_columnar.control b/src/backend/columnar/citus_columnar.control
index d8a54923c..c96d839d1 100644
--- a/src/backend/columnar/citus_columnar.control
+++ b/src/backend/columnar/citus_columnar.control
@@ -1,6 +1,6 @@
# Columnar extension
comment = 'Citus Columnar extension'
-default_version = '11.3-1'
+default_version = '12.2-1'
module_pathname = '$libdir/citus_columnar'
relocatable = false
schema = pg_catalog
diff --git a/src/backend/columnar/columnar_compression.c b/src/backend/columnar/columnar_compression.c
index b73c1dac6..98a175b06 100644
--- a/src/backend/columnar/columnar_compression.c
+++ b/src/backend/columnar/columnar_compression.c
@@ -18,11 +18,16 @@
#include "lib/stringinfo.h"
#include "columnar/columnar_compression.h"
+#include "distributed/pg_version_constants.h"
#if HAVE_CITUS_LIBLZ4
#include
#endif
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "varatt.h"
+#endif
+
#if HAVE_LIBZSTD
#include
#endif
diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c
index bd01c4faa..4ea96a121 100644
--- a/src/backend/columnar/columnar_customscan.c
+++ b/src/backend/columnar/columnar_customscan.c
@@ -33,6 +33,10 @@
#include "optimizer/paths.h"
#include "optimizer/plancat.h"
#include "optimizer/restrictinfo.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "parser/parse_relation.h"
+#include "parser/parsetree.h"
+#endif
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
@@ -127,6 +131,9 @@ static List * set_deparse_context_planstate(List *dpcontext, Node *node,
/* other helpers */
static List * ColumnarVarNeeded(ColumnarScanState *columnarScanState);
static Bitmapset * ColumnarAttrNeeded(ScanState *ss);
+#if PG_VERSION_NUM >= PG_VERSION_16
+static Bitmapset * fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns);
+#endif
/* saved hook value in case of unload */
static set_rel_pathlist_hook_type PreviousSetRelPathlistHook = NULL;
@@ -535,7 +542,7 @@ ColumnarIndexScanAdditionalCost(PlannerInfo *root, RelOptInfo *rel,
* "anti-correlated" (-1) since both help us avoiding from reading the
* same stripe again and again.
*/
- double absIndexCorrelation = Abs(indexCorrelation);
+ double absIndexCorrelation = float_abs(indexCorrelation);
/*
* To estimate the number of stripes that we need to read, we do linear
@@ -654,7 +661,7 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation
* If the Var is not highly correlated, then the chunk's min/max bounds
* will be nearly useless.
*/
- if (Abs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold)
+ if (float_abs(varCorrelation) < ColumnarQualPushdownCorrelationThreshold)
{
if (absVarCorrelation)
{
@@ -662,7 +669,7 @@ CheckVarStats(PlannerInfo *root, Var *var, Oid sortop, float4 *absVarCorrelation
* Report absVarCorrelation if caller wants to know why given
* var is rejected.
*/
- *absVarCorrelation = Abs(varCorrelation);
+ *absVarCorrelation = float_abs(varCorrelation);
}
return false;
}
@@ -1371,7 +1378,43 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
cpath->custom_private = list_make2(NIL, NIL);
}
- int numberOfColumnsRead = bms_num_members(rte->selectedCols);
+ int numberOfColumnsRead = 0;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (rte->perminfoindex > 0)
+ {
+ /*
+ * If perminfoindex > 0, that means that this relation's permission info
+ * is directly found in the list of rteperminfos of the Query(root->parse)
+ * So, all we have to do here is retrieve that info.
+ */
+ RTEPermissionInfo *perminfo = getRTEPermissionInfo(root->parse->rteperminfos,
+ rte);
+ numberOfColumnsRead = bms_num_members(perminfo->selectedCols);
+ }
+ else
+ {
+ /*
+ * If perminfoindex = 0, that means we are skipping the check for permission info
+ * for this relation, which means that it's either a partition or an inheritance child.
+ * In these cases, we need to access the permission info of the top parent of this relation.
+ * After thorough checking, we found that the index of the top parent pointing to the correct
+ * range table entry in Query's range tables (root->parse->rtable) is found under
+ * RelOptInfo rel->top_parent->relid.
+ * For reference, check expand_partitioned_rtentry and expand_inherited_rtentry PG functions
+ */
+ Assert(rel->top_parent);
+ RangeTblEntry *parent_rte = rt_fetch(rel->top_parent->relid, root->parse->rtable);
+ RTEPermissionInfo *perminfo = getRTEPermissionInfo(root->parse->rteperminfos,
+ parent_rte);
+ numberOfColumnsRead = bms_num_members(fixup_inherited_columns(perminfo->relid,
+ rte->relid,
+ perminfo->
+ selectedCols));
+ }
+#else
+ numberOfColumnsRead = bms_num_members(rte->selectedCols);
+#endif
+
int numberOfClausesPushed = list_length(allClauses);
CostColumnarScan(root, rel, rte->relid, cpath, numberOfColumnsRead,
@@ -1391,6 +1434,69 @@ AddColumnarScanPath(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte,
}
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+/*
+ * fixup_inherited_columns
+ *
+ * Exact function Copied from PG16 as it's static.
+ *
+ * When user is querying on a table with children, it implicitly accesses
+ * child tables also. So, we also need to check security label of child
+ * tables and columns, but there is no guarantee attribute numbers are
+ * same between the parent and children.
+ * It returns a bitmapset which contains attribute number of the child
+ * table based on the given bitmapset of the parent.
+ */
+static Bitmapset *
+fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns)
+{
+ Bitmapset *result = NULL;
+
+ /*
+ * obviously, no need to do anything here
+ */
+ if (parentId == childId)
+ {
+ return columns;
+ }
+
+ int index = -1;
+ while ((index = bms_next_member(columns, index)) >= 0)
+ {
+ /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */
+ AttrNumber attno = index + FirstLowInvalidHeapAttributeNumber;
+
+ /*
+ * whole-row-reference shall be fixed-up later
+ */
+ if (attno == InvalidAttrNumber)
+ {
+ result = bms_add_member(result, index);
+ continue;
+ }
+
+ char *attname = get_attname(parentId, attno, false);
+ attno = get_attnum(childId, attname);
+ if (attno == InvalidAttrNumber)
+ {
+ elog(ERROR, "cache lookup failed for attribute %s of relation %u",
+ attname, childId);
+ }
+
+ result = bms_add_member(result,
+ attno - FirstLowInvalidHeapAttributeNumber);
+
+ pfree(attname);
+ }
+
+ return result;
+}
+
+
+#endif
+
+
/*
* CostColumnarScan calculates the cost of scanning the columnar table. The
* cost is estimated by using all stripe metadata to estimate based on the
@@ -1435,7 +1541,8 @@ ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRe
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
- List *stripeList = StripesForRelfilenode(relation->rd_node);
+ List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
+ relation));
RelationClose(relation);
uint32 maxColumnCount = 0;
@@ -1492,7 +1599,8 @@ ColumnarTableStripeCount(Oid relationId)
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
- List *stripeList = StripesForRelfilenode(relation->rd_node);
+ List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
+ relation));
int stripeCount = list_length(stripeList);
RelationClose(relation);
diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c
index 7fbc96419..e7a6bfa95 100644
--- a/src/backend/columnar/columnar_metadata.c
+++ b/src/backend/columnar/columnar_metadata.c
@@ -47,6 +47,9 @@
#include "miscadmin.h"
#include "nodes/execnodes.h"
#include "lib/stringinfo.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "parser/parse_relation.h"
+#endif
#include "port.h"
#include "storage/fd.h"
#include "storage/lmgr.h"
@@ -57,7 +60,12 @@
#include "utils/memutils.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "storage/relfilelocator.h"
+#include "utils/relfilenumbermap.h"
+#else
#include "utils/relfilenodemap.h"
+#endif
#define COLUMNAR_RELOPTION_NAMESPACE "columnar"
#define SLOW_METADATA_ACCESS_WARNING \
@@ -112,7 +120,7 @@ static Oid ColumnarChunkGroupRelationId(void);
static Oid ColumnarChunkIndexRelationId(void);
static Oid ColumnarChunkGroupIndexRelationId(void);
static Oid ColumnarNamespaceId(void);
-static uint64 LookupStorageId(RelFileNode relfilenode);
+static uint64 LookupStorageId(RelFileLocator relfilelocator);
static uint64 GetHighestUsedRowNumber(uint64 storageId);
static void DeleteStorageFromColumnarMetadataTable(Oid metadataTableId,
AttrNumber storageIdAtrrNumber,
@@ -591,14 +599,15 @@ ReadColumnarOptions(Oid regclass, ColumnarOptions *options)
* of columnar.chunk.
*/
void
-SaveStripeSkipList(RelFileNode relfilenode, uint64 stripe, StripeSkipList *chunkList,
+SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
+ StripeSkipList *chunkList,
TupleDesc tupleDescriptor)
{
uint32 columnIndex = 0;
uint32 chunkIndex = 0;
uint32 columnCount = chunkList->columnCount;
- uint64 storageId = LookupStorageId(relfilenode);
+ uint64 storageId = LookupStorageId(relfilelocator);
Oid columnarChunkOid = ColumnarChunkRelationId();
Relation columnarChunk = table_open(columnarChunkOid, RowExclusiveLock);
ModifyState *modifyState = StartModifyRelation(columnarChunk);
@@ -657,10 +666,10 @@ SaveStripeSkipList(RelFileNode relfilenode, uint64 stripe, StripeSkipList *chunk
* SaveChunkGroups saves the metadata for given chunk groups in columnar.chunk_group.
*/
void
-SaveChunkGroups(RelFileNode relfilenode, uint64 stripe,
+SaveChunkGroups(RelFileLocator relfilelocator, uint64 stripe,
List *chunkGroupRowCounts)
{
- uint64 storageId = LookupStorageId(relfilenode);
+ uint64 storageId = LookupStorageId(relfilelocator);
Oid columnarChunkGroupOid = ColumnarChunkGroupRelationId();
Relation columnarChunkGroup = table_open(columnarChunkGroupOid, RowExclusiveLock);
ModifyState *modifyState = StartModifyRelation(columnarChunkGroup);
@@ -693,7 +702,8 @@ SaveChunkGroups(RelFileNode relfilenode, uint64 stripe,
* ReadStripeSkipList fetches chunk metadata for a given stripe.
*/
StripeSkipList *
-ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe, TupleDesc tupleDescriptor,
+ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
+ TupleDesc tupleDescriptor,
uint32 chunkCount, Snapshot snapshot)
{
int32 columnIndex = 0;
@@ -701,15 +711,15 @@ ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe, TupleDesc tupleDescri
uint32 columnCount = tupleDescriptor->natts;
ScanKeyData scanKey[2];
- uint64 storageId = LookupStorageId(relfilenode);
+ uint64 storageId = LookupStorageId(relfilelocator);
Oid columnarChunkOid = ColumnarChunkRelationId();
Relation columnarChunk = table_open(columnarChunkOid, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_columnar_chunk_storageid,
- BTEqualStrategyNumber, F_OIDEQ, UInt64GetDatum(storageId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
ScanKeyInit(&scanKey[1], Anum_columnar_chunk_stripe,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(stripe));
Oid indexId = ColumnarChunkIndexRelationId();
bool indexOk = OidIsValid(indexId);
@@ -915,7 +925,7 @@ StripeMetadataLookupRowNumber(Relation relation, uint64 rowNumber, Snapshot snap
uint64 storageId = ColumnarStorageGetStorageId(relation, false);
ScanKeyData scanKey[2];
ScanKeyInit(&scanKey[0], Anum_columnar_stripe_storageid,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(storageId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
StrategyNumber strategyNumber = InvalidStrategy;
RegProcedure procedure = InvalidOid;
@@ -930,7 +940,7 @@ StripeMetadataLookupRowNumber(Relation relation, uint64 rowNumber, Snapshot snap
procedure = F_INT8GT;
}
ScanKeyInit(&scanKey[1], Anum_columnar_stripe_first_row_number,
- strategyNumber, procedure, UInt64GetDatum(rowNumber));
+ strategyNumber, procedure, Int64GetDatum(rowNumber));
Relation columnarStripes = table_open(ColumnarStripeRelationId(), AccessShareLock);
@@ -1081,7 +1091,7 @@ FindStripeWithHighestRowNumber(Relation relation, Snapshot snapshot)
uint64 storageId = ColumnarStorageGetStorageId(relation, false);
ScanKeyData scanKey[1];
ScanKeyInit(&scanKey[0], Anum_columnar_stripe_storageid,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(storageId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
Relation columnarStripes = table_open(ColumnarStripeRelationId(), AccessShareLock);
@@ -1143,9 +1153,9 @@ ReadChunkGroupRowCounts(uint64 storageId, uint64 stripe, uint32 chunkGroupCount,
ScanKeyData scanKey[2];
ScanKeyInit(&scanKey[0], Anum_columnar_chunkgroup_storageid,
- BTEqualStrategyNumber, F_OIDEQ, UInt64GetDatum(storageId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
ScanKeyInit(&scanKey[1], Anum_columnar_chunkgroup_stripe,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripe));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(stripe));
Oid indexId = ColumnarChunkGroupIndexRelationId();
bool indexOk = OidIsValid(indexId);
@@ -1235,13 +1245,13 @@ InsertEmptyStripeMetadataRow(uint64 storageId, uint64 stripeId, uint32 columnCou
/*
- * StripesForRelfilenode returns a list of StripeMetadata for stripes
+ * StripesForRelfilelocator returns a list of StripeMetadata for stripes
* of the given relfilenode.
*/
List *
-StripesForRelfilenode(RelFileNode relfilenode)
+StripesForRelfilelocator(RelFileLocator relfilelocator)
{
- uint64 storageId = LookupStorageId(relfilenode);
+ uint64 storageId = LookupStorageId(relfilelocator);
return ReadDataFileStripeList(storageId, GetTransactionSnapshot());
}
@@ -1256,9 +1266,9 @@ StripesForRelfilenode(RelFileNode relfilenode)
* returns 0.
*/
uint64
-GetHighestUsedAddress(RelFileNode relfilenode)
+GetHighestUsedAddress(RelFileLocator relfilelocator)
{
- uint64 storageId = LookupStorageId(relfilenode);
+ uint64 storageId = LookupStorageId(relfilelocator);
uint64 highestUsedAddress = 0;
uint64 highestUsedId = 0;
@@ -1372,9 +1382,9 @@ UpdateStripeMetadataRow(uint64 storageId, uint64 stripeId, bool *update,
ScanKeyData scanKey[2];
ScanKeyInit(&scanKey[0], Anum_columnar_stripe_storageid,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(storageId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
ScanKeyInit(&scanKey[1], Anum_columnar_stripe_stripe,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(stripeId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(stripeId));
Oid columnarStripesOid = ColumnarStripeRelationId();
@@ -1451,7 +1461,7 @@ ReadDataFileStripeList(uint64 storageId, Snapshot snapshot)
HeapTuple heapTuple;
ScanKeyInit(&scanKey[0], Anum_columnar_stripe_storageid,
- BTEqualStrategyNumber, F_OIDEQ, Int32GetDatum(storageId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(storageId));
Oid columnarStripesOid = ColumnarStripeRelationId();
@@ -1539,7 +1549,7 @@ BuildStripeMetadata(Relation columnarStripes, HeapTuple heapTuple)
* metadata tables.
*/
void
-DeleteMetadataRows(RelFileNode relfilenode)
+DeleteMetadataRows(RelFileLocator relfilelocator)
{
/*
* During a restore for binary upgrade, metadata tables and indexes may or
@@ -1550,7 +1560,7 @@ DeleteMetadataRows(RelFileNode relfilenode)
return;
}
- uint64 storageId = LookupStorageId(relfilenode);
+ uint64 storageId = LookupStorageId(relfilelocator);
DeleteStorageFromColumnarMetadataTable(ColumnarStripeRelationId(),
Anum_columnar_stripe_storageid,
@@ -1578,7 +1588,7 @@ DeleteStorageFromColumnarMetadataTable(Oid metadataTableId,
{
ScanKeyData scanKey[1];
ScanKeyInit(&scanKey[0], storageIdAtrrNumber, BTEqualStrategyNumber,
- F_INT8EQ, UInt64GetDatum(storageId));
+ F_INT8EQ, Int64GetDatum(storageId));
Relation metadataTable = try_relation_open(metadataTableId, AccessShareLock);
if (metadataTable == NULL)
@@ -1713,7 +1723,14 @@ create_estate_for_relation(Relation rel)
rte->relid = RelationGetRelid(rel);
rte->relkind = rel->rd_rel->relkind;
rte->rellockmode = AccessShareLock;
+
+#if PG_VERSION_NUM >= PG_VERSION_16
+ List *perminfos = NIL;
+ addRTEPermissionInfo(&perminfos, rte);
+ ExecInitRangeTable(estate, list_make1(rte), perminfos);
+#else
ExecInitRangeTable(estate, list_make1(rte));
+#endif
estate->es_output_cid = GetCurrentCommandId(true);
@@ -1917,10 +1934,11 @@ ColumnarNamespaceId(void)
* false if the relation doesn't have a meta page yet.
*/
static uint64
-LookupStorageId(RelFileNode relfilenode)
+LookupStorageId(RelFileLocator relfilelocator)
{
- Oid relationId = RelidByRelfilenode(relfilenode.spcNode,
- relfilenode.relNode);
+ Oid relationId = RelidByRelfilenumber(RelationTablespace_compat(relfilelocator),
+ RelationPhysicalIdentifierNumber_compat(
+ relfilelocator));
Relation relation = relation_open(relationId, AccessShareLock);
uint64 storageId = ColumnarStorageGetStorageId(relation, false);
@@ -1951,7 +1969,7 @@ columnar_relation_storageid(PG_FUNCTION_ARGS)
Oid relationId = PG_GETARG_OID(0);
Relation relation = relation_open(relationId, AccessShareLock);
- if (!pg_class_ownercheck(relationId, GetUserId()))
+ if (!object_ownercheck(RelationRelationId, relationId, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE,
get_rel_name(relationId));
diff --git a/src/backend/columnar/columnar_reader.c b/src/backend/columnar/columnar_reader.c
index 7917a446a..526dd03cb 100644
--- a/src/backend/columnar/columnar_reader.c
+++ b/src/backend/columnar/columnar_reader.c
@@ -254,8 +254,9 @@ ColumnarReadFlushPendingWrites(ColumnarReadState *readState)
{
Assert(!readState->snapshotRegisteredByUs);
- Oid relfilenode = readState->relation->rd_node.relNode;
- FlushWriteStateForRelfilenode(relfilenode, GetCurrentSubTransactionId());
+ RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat(
+ RelationPhysicalIdentifier_compat(readState->relation));
+ FlushWriteStateForRelfilenumber(relfilenumber, GetCurrentSubTransactionId());
if (readState->snapshot == InvalidSnapshot || !IsMVCCSnapshot(readState->snapshot))
{
@@ -984,7 +985,8 @@ ColumnarTableRowCount(Relation relation)
{
ListCell *stripeMetadataCell = NULL;
uint64 totalRowCount = 0;
- List *stripeList = StripesForRelfilenode(relation->rd_node);
+ List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
+ relation));
foreach(stripeMetadataCell, stripeList)
{
@@ -1012,7 +1014,8 @@ LoadFilteredStripeBuffers(Relation relation, StripeMetadata *stripeMetadata,
bool *projectedColumnMask = ProjectedColumnMask(columnCount, projectedColumnList);
- StripeSkipList *stripeSkipList = ReadStripeSkipList(relation->rd_node,
+ StripeSkipList *stripeSkipList = ReadStripeSkipList(RelationPhysicalIdentifier_compat(
+ relation),
stripeMetadata->id,
tupleDescriptor,
stripeMetadata->chunkCount,
diff --git a/src/backend/columnar/columnar_storage.c b/src/backend/columnar/columnar_storage.c
index 9712e7160..21aa7ab9c 100644
--- a/src/backend/columnar/columnar_storage.c
+++ b/src/backend/columnar/columnar_storage.c
@@ -169,7 +169,11 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
}
/* create two pages */
+#if PG_VERSION_NUM >= PG_VERSION_16
+ PGIOAlignedBlock block;
+#else
PGAlignedBlock block;
+#endif
Page page = block.data;
/* write metapage */
@@ -188,7 +192,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
(char *) &metapage, sizeof(ColumnarMetapage));
phdr->pd_lower += sizeof(ColumnarMetapage);
- log_newpage(&srel->smgr_rnode.node, MAIN_FORKNUM,
+ log_newpage(RelationPhysicalIdentifierBackend_compat(&srel), MAIN_FORKNUM,
COLUMNAR_METAPAGE_BLOCKNO, page, true);
PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO);
smgrextend(srel, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true);
@@ -196,7 +200,7 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId)
/* write empty page */
PageInit(page, BLCKSZ, 0);
- log_newpage(&srel->smgr_rnode.node, MAIN_FORKNUM,
+ log_newpage(RelationPhysicalIdentifierBackend_compat(&srel), MAIN_FORKNUM,
COLUMNAR_EMPTY_BLOCKNO, page, true);
PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO);
smgrextend(srel, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true);
diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c
index 4a08feb54..dade931df 100644
--- a/src/backend/columnar/columnar_tableam.c
+++ b/src/backend/columnar/columnar_tableam.c
@@ -31,6 +31,7 @@
#include "executor/executor.h"
#include "nodes/makefuncs.h"
#include "optimizer/plancat.h"
+#include "pg_version_compat.h"
#include "pgstat.h"
#include "safe_lib.h"
#include "storage/bufmgr.h"
@@ -206,7 +207,8 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot,
uint32 flags, Bitmapset *attr_needed, List *scanQual)
{
CheckCitusColumnarVersion(ERROR);
- Oid relfilenode = relation->rd_node.relNode;
+ RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat(
+ RelationPhysicalIdentifier_compat(relation));
/*
* A memory context to use for scan-wide data, including the lazily
@@ -236,7 +238,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot,
scan->scanQual = copyObject(scanQual);
scan->scanContext = scanContext;
- if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId()))
+ if (PendingWritesInUpperTransactions(relfilenumber, GetCurrentSubTransactionId()))
{
elog(ERROR,
"cannot read from table when there is unflushed data in upper transactions");
@@ -432,8 +434,9 @@ columnar_index_fetch_begin(Relation rel)
{
CheckCitusColumnarVersion(ERROR);
- Oid relfilenode = rel->rd_node.relNode;
- if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId()))
+ RelFileNumber relfilenumber = RelationPhysicalIdentifierNumber_compat(
+ RelationPhysicalIdentifier_compat(rel));
+ if (PendingWritesInUpperTransactions(relfilenumber, GetCurrentSubTransactionId()))
{
/* XXX: maybe we can just flush the data and continue */
elog(ERROR, "cannot read from index when there is unflushed data in "
@@ -815,7 +818,7 @@ static TM_Result
columnar_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
bool wait, TM_FailureData *tmfd,
- LockTupleMode *lockmode, bool *update_indexes)
+ LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
{
elog(ERROR, "columnar_tuple_update not implemented");
}
@@ -841,11 +844,11 @@ columnar_finish_bulk_insert(Relation relation, int options)
static void
-columnar_relation_set_new_filenode(Relation rel,
- const RelFileNode *newrnode,
- char persistence,
- TransactionId *freezeXid,
- MultiXactId *minmulti)
+columnar_relation_set_new_filelocator(Relation rel,
+ const RelFileLocator *newrlocator,
+ char persistence,
+ TransactionId *freezeXid,
+ MultiXactId *minmulti)
{
CheckCitusColumnarVersion(ERROR);
@@ -861,16 +864,19 @@ columnar_relation_set_new_filenode(Relation rel,
* state. If they are equal, this is a new relation object and we don't
* need to clean anything.
*/
- if (rel->rd_node.relNode != newrnode->relNode)
+ if (RelationPhysicalIdentifierNumber_compat(RelationPhysicalIdentifier_compat(rel)) !=
+ RelationPhysicalIdentifierNumberPtr_compat(newrlocator))
{
- MarkRelfilenodeDropped(rel->rd_node.relNode, GetCurrentSubTransactionId());
+ MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat(
+ RelationPhysicalIdentifier_compat(rel)),
+ GetCurrentSubTransactionId());
- DeleteMetadataRows(rel->rd_node);
+ DeleteMetadataRows(RelationPhysicalIdentifier_compat(rel));
}
*freezeXid = RecentXmin;
*minmulti = GetOldestMultiXactId();
- SMgrRelation srel = RelationCreateStorage_compat(*newrnode, persistence, true);
+ SMgrRelation srel = RelationCreateStorage_compat(*newrlocator, persistence, true);
ColumnarStorageInit(srel, ColumnarMetadataNewStorageId());
InitColumnarOptions(rel->rd_id);
@@ -885,12 +891,12 @@ static void
columnar_relation_nontransactional_truncate(Relation rel)
{
CheckCitusColumnarVersion(ERROR);
- RelFileNode relfilenode = rel->rd_node;
+ RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
- NonTransactionDropWriteState(relfilenode.relNode);
+ NonTransactionDropWriteState(RelationPhysicalIdentifierNumber_compat(relfilelocator));
/* Delete old relfilenode metadata */
- DeleteMetadataRows(relfilenode);
+ DeleteMetadataRows(relfilelocator);
/*
* No need to set new relfilenode, since the table was created in this
@@ -907,7 +913,7 @@ columnar_relation_nontransactional_truncate(Relation rel)
static void
-columnar_relation_copy_data(Relation rel, const RelFileNode *newrnode)
+columnar_relation_copy_data(Relation rel, const RelFileLocator *newrnode)
{
elog(ERROR, "columnar_relation_copy_data not implemented");
}
@@ -953,7 +959,8 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
ColumnarOptions columnarOptions = { 0 };
ReadColumnarOptions(OldHeap->rd_id, &columnarOptions);
- ColumnarWriteState *writeState = ColumnarBeginWrite(NewHeap->rd_node,
+ ColumnarWriteState *writeState = ColumnarBeginWrite(RelationPhysicalIdentifier_compat(
+ NewHeap),
columnarOptions,
targetDesc);
@@ -1028,7 +1035,8 @@ NeededColumnsList(TupleDesc tupdesc, Bitmapset *attr_needed)
static uint64
ColumnarTableTupleCount(Relation relation)
{
- List *stripeList = StripesForRelfilenode(relation->rd_node);
+ List *stripeList = StripesForRelfilelocator(RelationPhysicalIdentifier_compat(
+ relation));
uint64 tupleCount = 0;
ListCell *lc = NULL;
@@ -1091,12 +1099,38 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
List *indexList = RelationGetIndexList(rel);
int nindexes = list_length(indexList);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ struct VacuumCutoffs cutoffs;
+ vacuum_get_cutoffs(rel, params, &cutoffs);
+
+ Assert(MultiXactIdPrecedesOrEquals(cutoffs.MultiXactCutoff, cutoffs.OldestMxact));
+ Assert(TransactionIdPrecedesOrEquals(cutoffs.FreezeLimit, cutoffs.OldestXmin));
+
+ /*
+ * Columnar storage doesn't hold any transaction IDs, so we can always
+ * just advance to the most aggressive value.
+ */
+ TransactionId newRelFrozenXid = cutoffs.OldestXmin;
+ MultiXactId newRelminMxid = cutoffs.OldestMxact;
+ double new_live_tuples = ColumnarTableTupleCount(rel);
+
+ /* all visible pages are always 0 */
+ BlockNumber new_rel_allvisible = 0;
+
+ bool frozenxid_updated;
+ bool minmulti_updated;
+
+ vac_update_relstats(rel, new_rel_pages, new_live_tuples,
+ new_rel_allvisible, nindexes > 0,
+ newRelFrozenXid, newRelminMxid,
+ &frozenxid_updated, &minmulti_updated, false);
+#else
TransactionId oldestXmin;
TransactionId freezeLimit;
MultiXactId multiXactCutoff;
/* initialize xids */
-#if PG_VERSION_NUM >= PG_VERSION_15
+#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16)
MultiXactId oldestMxact;
vacuum_set_xid_limits(rel,
params->freeze_min_age,
@@ -1126,7 +1160,7 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
* just advance to the most aggressive value.
*/
TransactionId newRelFrozenXid = oldestXmin;
-#if PG_VERSION_NUM >= PG_VERSION_15
+#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16)
MultiXactId newRelminMxid = oldestMxact;
#else
MultiXactId newRelminMxid = multiXactCutoff;
@@ -1137,7 +1171,7 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
/* all visible pages are always 0 */
BlockNumber new_rel_allvisible = 0;
-#if PG_VERSION_NUM >= PG_VERSION_15
+#if (PG_VERSION_NUM >= PG_VERSION_15) && (PG_VERSION_NUM < PG_VERSION_16)
bool frozenxid_updated;
bool minmulti_updated;
@@ -1149,6 +1183,7 @@ columnar_vacuum_rel(Relation rel, VacuumParams *params,
vac_update_relstats(rel, new_rel_pages, new_live_tuples,
new_rel_allvisible, nindexes > 0,
newRelFrozenXid, newRelminMxid, false);
+#endif
#endif
pgstat_report_vacuum(RelationGetRelid(rel),
@@ -1166,7 +1201,7 @@ static void
LogRelationStats(Relation rel, int elevel)
{
ListCell *stripeMetadataCell = NULL;
- RelFileNode relfilenode = rel->rd_node;
+ RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
StringInfo infoBuf = makeStringInfo();
int compressionStats[COMPRESSION_COUNT] = { 0 };
@@ -1177,13 +1212,13 @@ LogRelationStats(Relation rel, int elevel)
uint64 droppedChunksWithData = 0;
uint64 totalDecompressedLength = 0;
- List *stripeList = StripesForRelfilenode(relfilenode);
+ List *stripeList = StripesForRelfilelocator(relfilelocator);
int stripeCount = list_length(stripeList);
foreach(stripeMetadataCell, stripeList)
{
StripeMetadata *stripe = lfirst(stripeMetadataCell);
- StripeSkipList *skiplist = ReadStripeSkipList(relfilenode, stripe->id,
+ StripeSkipList *skiplist = ReadStripeSkipList(relfilelocator, stripe->id,
RelationGetDescr(rel),
stripe->chunkCount,
GetTransactionSnapshot());
@@ -1319,7 +1354,8 @@ TruncateColumnar(Relation rel, int elevel)
* new stripes be added beyond highestPhysicalAddress while
* we're truncating.
*/
- uint64 newDataReservation = Max(GetHighestUsedAddress(rel->rd_node) + 1,
+ uint64 newDataReservation = Max(GetHighestUsedAddress(
+ RelationPhysicalIdentifier_compat(rel)) + 1,
ColumnarFirstLogicalOffset);
BlockNumber old_rel_pages = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM);
@@ -1826,8 +1862,8 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt
Datum *abbrev = NULL;
Datum tsDatum;
bool tsDatumIsNull;
- if (!tuplesort_getdatum(tupleSort, forwardDirection, &tsDatum,
- &tsDatumIsNull, abbrev))
+ if (!tuplesort_getdatum_compat(tupleSort, forwardDirection, false,
+ &tsDatum, &tsDatumIsNull, abbrev))
{
ItemPointerSetInvalid(&tsItemPointerData);
break;
@@ -2068,12 +2104,13 @@ ColumnarTableDropHook(Oid relid)
* tableam tables storage is managed by postgres.
*/
Relation rel = table_open(relid, AccessExclusiveLock);
- RelFileNode relfilenode = rel->rd_node;
+ RelFileLocator relfilelocator = RelationPhysicalIdentifier_compat(rel);
- DeleteMetadataRows(relfilenode);
+ DeleteMetadataRows(relfilelocator);
DeleteColumnarTableOptions(rel->rd_id, true);
- MarkRelfilenodeDropped(relfilenode.relNode, GetCurrentSubTransactionId());
+ MarkRelfilenumberDropped(RelationPhysicalIdentifierNumber_compat(relfilelocator),
+ GetCurrentSubTransactionId());
/* keep the lock since we did physical changes to the relation */
table_close(rel, NoLock);
@@ -2490,7 +2527,11 @@ static const TableAmRoutine columnar_am_methods = {
.tuple_lock = columnar_tuple_lock,
.finish_bulk_insert = columnar_finish_bulk_insert,
- .relation_set_new_filenode = columnar_relation_set_new_filenode,
+#if PG_VERSION_NUM >= PG_VERSION_16
+ .relation_set_new_filelocator = columnar_relation_set_new_filelocator,
+#else
+ .relation_set_new_filenode = columnar_relation_set_new_filelocator,
+#endif
.relation_nontransactional_truncate = columnar_relation_nontransactional_truncate,
.relation_copy_data = columnar_relation_copy_data,
.relation_copy_for_cluster = columnar_relation_copy_for_cluster,
diff --git a/src/backend/columnar/columnar_writer.c b/src/backend/columnar/columnar_writer.c
index 8e35b59b1..3b510ce74 100644
--- a/src/backend/columnar/columnar_writer.c
+++ b/src/backend/columnar/columnar_writer.c
@@ -22,12 +22,18 @@
#include "access/nbtree.h"
#include "catalog/pg_am.h"
#include "miscadmin.h"
+#include "pg_version_compat.h"
#include "storage/fd.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/rel.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "storage/relfilelocator.h"
+#include "utils/relfilenumbermap.h"
+#else
#include "utils/relfilenodemap.h"
+#endif
#include "columnar/columnar.h"
#include "columnar/columnar_storage.h"
@@ -37,7 +43,7 @@ struct ColumnarWriteState
{
TupleDesc tupleDescriptor;
FmgrInfo **comparisonFunctionArray;
- RelFileNode relfilenode;
+ RelFileLocator relfilelocator;
MemoryContext stripeWriteContext;
MemoryContext perTupleContext;
@@ -84,7 +90,7 @@ static StringInfo CopyStringInfo(StringInfo sourceString);
* data load operation.
*/
ColumnarWriteState *
-ColumnarBeginWrite(RelFileNode relfilenode,
+ColumnarBeginWrite(RelFileLocator relfilelocator,
ColumnarOptions options,
TupleDesc tupleDescriptor)
{
@@ -124,7 +130,7 @@ ColumnarBeginWrite(RelFileNode relfilenode,
options.chunkRowCount);
ColumnarWriteState *writeState = palloc0(sizeof(ColumnarWriteState));
- writeState->relfilenode = relfilenode;
+ writeState->relfilelocator = relfilelocator;
writeState->options = options;
writeState->tupleDescriptor = CreateTupleDescCopy(tupleDescriptor);
writeState->comparisonFunctionArray = comparisonFunctionArray;
@@ -174,8 +180,10 @@ ColumnarWriteRow(ColumnarWriteState *writeState, Datum *columnValues, bool *colu
writeState->stripeSkipList = stripeSkipList;
writeState->compressionBuffer = makeStringInfo();
- Oid relationId = RelidByRelfilenode(writeState->relfilenode.spcNode,
- writeState->relfilenode.relNode);
+ Oid relationId = RelidByRelfilenumber(RelationTablespace_compat(
+ writeState->relfilelocator),
+ RelationPhysicalIdentifierNumber_compat(
+ writeState->relfilelocator));
Relation relation = relation_open(relationId, NoLock);
writeState->emptyStripeReservation =
ReserveEmptyStripe(relation, columnCount, chunkRowCount,
@@ -393,8 +401,10 @@ FlushStripe(ColumnarWriteState *writeState)
elog(DEBUG1, "Flushing Stripe of size %d", stripeBuffers->rowCount);
- Oid relationId = RelidByRelfilenode(writeState->relfilenode.spcNode,
- writeState->relfilenode.relNode);
+ Oid relationId = RelidByRelfilenumber(RelationTablespace_compat(
+ writeState->relfilelocator),
+ RelationPhysicalIdentifierNumber_compat(
+ writeState->relfilelocator));
Relation relation = relation_open(relationId, NoLock);
/*
@@ -486,10 +496,10 @@ FlushStripe(ColumnarWriteState *writeState)
}
}
- SaveChunkGroups(writeState->relfilenode,
+ SaveChunkGroups(writeState->relfilelocator,
stripeMetadata->id,
writeState->chunkGroupRowCounts);
- SaveStripeSkipList(writeState->relfilenode,
+ SaveStripeSkipList(writeState->relfilelocator,
stripeMetadata->id,
stripeSkipList, tupleDescriptor);
diff --git a/src/backend/columnar/sql/citus_columnar--11.3-1--12.2-1.sql b/src/backend/columnar/sql/citus_columnar--11.3-1--12.2-1.sql
new file mode 100644
index 000000000..364241dc3
--- /dev/null
+++ b/src/backend/columnar/sql/citus_columnar--11.3-1--12.2-1.sql
@@ -0,0 +1 @@
+-- citus_columnar--11.3-1--12.2-1
diff --git a/src/backend/columnar/sql/downgrades/citus_columnar--12.2-1--11.3-1.sql b/src/backend/columnar/sql/downgrades/citus_columnar--12.2-1--11.3-1.sql
new file mode 100644
index 000000000..34a0c2459
--- /dev/null
+++ b/src/backend/columnar/sql/downgrades/citus_columnar--12.2-1--11.3-1.sql
@@ -0,0 +1 @@
+-- citus_columnar--12.2-1--11.3-1
diff --git a/src/backend/columnar/write_state_management.c b/src/backend/columnar/write_state_management.c
index e3bfc9260..27d902e61 100644
--- a/src/backend/columnar/write_state_management.c
+++ b/src/backend/columnar/write_state_management.c
@@ -29,6 +29,7 @@
#include "executor/executor.h"
#include "nodes/makefuncs.h"
#include "optimizer/plancat.h"
+#include "pg_version_compat.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
@@ -77,7 +78,7 @@ typedef struct SubXidWriteState
typedef struct WriteStateMapEntry
{
/* key of the entry */
- Oid relfilenode;
+ RelFileNumber relfilenumber;
/*
* If a table is dropped, we set dropped to true and set dropSubXid to the
@@ -132,7 +133,7 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
HASHCTL info;
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
memset(&info, 0, sizeof(info));
- info.keysize = sizeof(Oid);
+ info.keysize = sizeof(RelFileNumber);
info.hash = oid_hash;
info.entrysize = sizeof(WriteStateMapEntry);
info.hcxt = WriteStateContext;
@@ -146,7 +147,10 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
MemoryContextRegisterResetCallback(WriteStateContext, &cleanupCallback);
}
- WriteStateMapEntry *hashEntry = hash_search(WriteStateMap, &relation->rd_node.relNode,
+ WriteStateMapEntry *hashEntry = hash_search(WriteStateMap,
+ &RelationPhysicalIdentifierNumber_compat(
+ RelationPhysicalIdentifier_compat(
+ relation)),
HASH_ENTER, &found);
if (!found)
{
@@ -189,7 +193,8 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
ReadColumnarOptions(tupSlotRelationId, &columnarOptions);
SubXidWriteState *stackEntry = palloc0(sizeof(SubXidWriteState));
- stackEntry->writeState = ColumnarBeginWrite(relation->rd_node,
+ stackEntry->writeState = ColumnarBeginWrite(RelationPhysicalIdentifier_compat(
+ relation),
columnarOptions,
tupdesc);
stackEntry->subXid = currentSubXid;
@@ -206,14 +211,16 @@ columnar_init_write_state(Relation relation, TupleDesc tupdesc,
* Flushes pending writes for given relfilenode in the given subtransaction.
*/
void
-FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId currentSubXid)
+FlushWriteStateForRelfilenumber(RelFileNumber relfilenumber,
+ SubTransactionId currentSubXid)
{
if (WriteStateMap == NULL)
{
return;
}
- WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenode, HASH_FIND, NULL);
+ WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenumber, HASH_FIND,
+ NULL);
Assert(!entry || !entry->dropped);
@@ -320,14 +327,14 @@ DiscardWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId par
* Called when the given relfilenode is dropped.
*/
void
-MarkRelfilenodeDropped(Oid relfilenode, SubTransactionId currentSubXid)
+MarkRelfilenumberDropped(RelFileNumber relfilenumber, SubTransactionId currentSubXid)
{
if (WriteStateMap == NULL)
{
return;
}
- WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenode, HASH_FIND,
+ WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenumber, HASH_FIND,
NULL);
if (!entry || entry->dropped)
{
@@ -343,11 +350,11 @@ MarkRelfilenodeDropped(Oid relfilenode, SubTransactionId currentSubXid)
* Called when the given relfilenode is dropped in non-transactional TRUNCATE.
*/
void
-NonTransactionDropWriteState(Oid relfilenode)
+NonTransactionDropWriteState(RelFileNumber relfilenumber)
{
if (WriteStateMap)
{
- hash_search(WriteStateMap, &relfilenode, HASH_REMOVE, false);
+ hash_search(WriteStateMap, &relfilenumber, HASH_REMOVE, false);
}
}
@@ -356,14 +363,16 @@ NonTransactionDropWriteState(Oid relfilenode)
* Returns true if there are any pending writes in upper transactions.
*/
bool
-PendingWritesInUpperTransactions(Oid relfilenode, SubTransactionId currentSubXid)
+PendingWritesInUpperTransactions(RelFileNumber relfilenumber,
+ SubTransactionId currentSubXid)
{
if (WriteStateMap == NULL)
{
return false;
}
- WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenode, HASH_FIND, NULL);
+ WriteStateMapEntry *entry = hash_search(WriteStateMap, &relfilenumber, HASH_FIND,
+ NULL);
if (entry && entry->writeStateStack != NULL)
{
diff --git a/src/backend/distributed/cdc/cdc_decoder_utils.c b/src/backend/distributed/cdc/cdc_decoder_utils.c
index a69f307ba..f5b23aa12 100644
--- a/src/backend/distributed/cdc/cdc_decoder_utils.c
+++ b/src/backend/distributed/cdc/cdc_decoder_utils.c
@@ -72,7 +72,7 @@ DistShardRelationId(void)
/*
- * DistShardRelationId returns the relation id of the pg_dist_shard
+ * DistShardShardidIndexId returns the relation id of the pg_dist_shard_shardid_index
*/
static Oid
DistShardShardidIndexId(void)
@@ -87,7 +87,7 @@ DistShardShardidIndexId(void)
/*
- * DistShardRelationId returns the relation id of the pg_dist_shard
+ * DistPartitionRelationId returns the relation id of the pg_dist_partition
*/
static Oid
DistPartitionRelationId(void)
@@ -184,9 +184,9 @@ CdcExtractShardIdFromTableName(const char *tableName, bool missingOk)
/*
- * CdcGetLocalGroupId returns the group identifier of the local node. The function assumes
- * that pg_dist_local_node_group has exactly one row and has at least one column.
- * Otherwise, the function errors out.
+ * CdcGetLocalGroupId returns the group identifier of the local node. The
+ * function assumes that pg_dist_local_group has exactly one row and has at
+ * least one column. Otherwise, the function errors out.
*/
static int32
CdcGetLocalGroupId(void)
@@ -376,7 +376,8 @@ CdcIsReferenceTableViaCatalog(Oid relationId)
* A table is a reference table when its partition method is 'none'
* and replication model is 'two phase commit'
*/
- return partitionMethodChar == 'n' && replicationModelChar == 't';
+ return partitionMethodChar == DISTRIBUTE_BY_NONE &&
+ replicationModelChar == REPLICATION_MODEL_2PC;
}
diff --git a/src/backend/distributed/citus.control b/src/backend/distributed/citus.control
index 292525f27..91c0a3be7 100644
--- a/src/backend/distributed/citus.control
+++ b/src/backend/distributed/citus.control
@@ -1,6 +1,6 @@
# Citus extension
comment = 'Citus distributed database'
-default_version = '12.0-1'
+default_version = '12.2-1'
module_pathname = '$libdir/citus'
relocatable = false
schema = pg_catalog
diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c
index 788a3b8b0..8c2736a28 100644
--- a/src/backend/distributed/commands/alter_table.c
+++ b/src/backend/distributed/commands/alter_table.c
@@ -53,11 +53,13 @@
#include "distributed/multi_executor.h"
#include "distributed/multi_logical_planner.h"
#include "distributed/multi_partitioning_utils.h"
+#include "distributed/namespace_utils.h"
#include "distributed/reference_table_utils.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/replication_origin_session_utils.h"
#include "distributed/shared_library_init.h"
#include "distributed/shard_utils.h"
+#include "distributed/tenant_schema_metadata.h"
#include "distributed/worker_protocol.h"
#include "distributed/worker_transaction.h"
#include "executor/spi.h"
@@ -1764,10 +1766,7 @@ CreateMaterializedViewDDLCommand(Oid matViewOid)
* Set search_path to NIL so that all objects outside of pg_catalog will be
* schema-prefixed.
*/
- OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
- PushOverrideSearchPath(overridePath);
+ int saveNestLevel = PushEmptySearchPath();
/*
* Push the transaction snapshot to be able to get vief definition with pg_get_viewdef
@@ -1779,7 +1778,7 @@ CreateMaterializedViewDDLCommand(Oid matViewOid)
char *viewDefinition = TextDatumGetCString(viewDefinitionDatum);
PopActiveSnapshot();
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
appendStringInfo(query, "AS %s", viewDefinition);
diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c
index d1ba25c22..c713ce099 100644
--- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c
+++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c
@@ -892,7 +892,7 @@ GetConstraintNameList(Oid relationId)
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid,
- BTEqualStrategyNumber, F_OIDEQ, relationId);
+ BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
bool useIndex = true;
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint,
@@ -1478,11 +1478,20 @@ InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId,
static void
FinalizeCitusLocalTableCreation(Oid relationId)
{
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /*
+ * PG16+ supports truncate triggers on foreign tables
+ */
+ if (RegularTable(relationId) || IsForeignTable(relationId))
+#else
+
/*
* If it is a foreign table, then skip creating citus truncate trigger
* as foreign tables do not support truncate triggers.
*/
if (RegularTable(relationId))
+#endif
{
CreateTruncateTrigger(relationId);
}
diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c
index eb4dd9654..023197e15 100644
--- a/src/backend/distributed/commands/collation.c
+++ b/src/backend/distributed/commands/collation.c
@@ -188,7 +188,16 @@ CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollati
pfree(collcollate);
pfree(collctype);
#endif
-
+#if PG_VERSION_NUM >= PG_VERSION_16
+ char *collicurules = NULL;
+ datum = SysCacheGetAttr(COLLOID, heapTuple, Anum_pg_collation_collicurules, &isnull);
+ if (!isnull)
+ {
+ collicurules = TextDatumGetCString(datum);
+ appendStringInfo(&collationNameDef, ", rules = %s",
+ quote_literal_cstr(collicurules));
+ }
+#endif
if (!collisdeterministic)
{
appendStringInfoString(&collationNameDef, ", deterministic = false");
diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c
index 8810e6db9..dc06692b3 100644
--- a/src/backend/distributed/commands/create_distributed_table.c
+++ b/src/backend/distributed/commands/create_distributed_table.c
@@ -60,6 +60,7 @@
#include "distributed/reference_table_utils.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/remote_commands.h"
+#include "distributed/replicate_none_dist_table_shard.h"
#include "distributed/resource_lock.h"
#include "distributed/shard_cleaner.h"
#include "distributed/shard_rebalancer.h"
@@ -139,6 +140,10 @@ static CitusTableParams DecideCitusTableParams(CitusTableType tableType,
distributedTableParams);
static void CreateCitusTable(Oid relationId, CitusTableType tableType,
DistributedTableParams *distributedTableParams);
+static void ConvertCitusLocalTableToTableType(Oid relationId,
+ CitusTableType tableType,
+ DistributedTableParams *
+ distributedTableParams);
static void CreateHashDistributedTableShards(Oid relationId, int shardCount,
Oid colocatedTableId, bool localTableEmpty);
static void CreateSingleShardTableShard(Oid relationId, Oid colocatedTableId,
@@ -159,7 +164,7 @@ static void EnsureCitusTableCanBeCreated(Oid relationOid);
static void PropagatePrerequisiteObjectsForDistributedTable(Oid relationId);
static void EnsureDistributedSequencesHaveOneType(Oid relationId,
List *seqInfoList);
-static void CopyLocalDataIntoShards(Oid relationId);
+static void CopyLocalDataIntoShards(Oid distributedTableId);
static List * TupleDescColumnNameList(TupleDesc tupleDescriptor);
#if (PG_VERSION_NUM >= PG_VERSION_15)
@@ -172,10 +177,10 @@ static bool is_valid_numeric_typmod(int32 typmod);
static bool DistributionColumnUsesGeneratedStoredColumn(TupleDesc relationDesc,
Var *distributionColumn);
static bool CanUseExclusiveConnections(Oid relationId, bool localTableEmpty);
-static void DoCopyFromLocalTableIntoShards(Relation distributedRelation,
- DestReceiver *copyDest,
- TupleTableSlot *slot,
- EState *estate);
+static uint64 DoCopyFromLocalTableIntoShards(Relation distributedRelation,
+ DestReceiver *copyDest,
+ TupleTableSlot *slot,
+ EState *estate);
static void ErrorIfTemporaryTable(Oid relationId);
static void ErrorIfForeignTable(Oid relationOid);
static void SendAddLocalTableToMetadataCommandOutsideTransaction(Oid relationId);
@@ -1019,19 +1024,29 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
/*
- * CreateReferenceTable is a wrapper around CreateCitusTable that creates a
- * reference table.
+ * CreateReferenceTable creates a reference table.
*/
void
CreateReferenceTable(Oid relationId)
{
- CreateCitusTable(relationId, REFERENCE_TABLE, NULL);
+ if (IsCitusTableType(relationId, CITUS_LOCAL_TABLE))
+ {
+ /*
+ * Create the shard of given Citus local table on workers to convert
+ * it into a reference table.
+ */
+ ConvertCitusLocalTableToTableType(relationId, REFERENCE_TABLE, NULL);
+ }
+ else
+ {
+ CreateCitusTable(relationId, REFERENCE_TABLE, NULL);
+ }
}
/*
- * CreateSingleShardTable is a wrapper around CreateCitusTable that creates a
- * single shard distributed table that doesn't have a shard key.
+ * CreateSingleShardTable creates a single shard distributed table that
+ * doesn't have a shard key.
*/
void
CreateSingleShardTable(Oid relationId, ColocationParam colocationParam)
@@ -1042,7 +1057,21 @@ CreateSingleShardTable(Oid relationId, ColocationParam colocationParam)
.shardCountIsStrict = true,
.distributionColumnName = NULL
};
- CreateCitusTable(relationId, SINGLE_SHARD_DISTRIBUTED, &distributedTableParams);
+
+ if (IsCitusTableType(relationId, CITUS_LOCAL_TABLE))
+ {
+ /*
+ * Create the shard of given Citus local table on appropriate node
+ * and drop the local one to convert it into a single-shard distributed
+ * table.
+ */
+ ConvertCitusLocalTableToTableType(relationId, SINGLE_SHARD_DISTRIBUTED,
+ &distributedTableParams);
+ }
+ else
+ {
+ CreateCitusTable(relationId, SINGLE_SHARD_DISTRIBUTED, &distributedTableParams);
+ }
}
@@ -1097,7 +1126,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
/*
* EnsureTableNotDistributed errors out when relation is a citus table but
* we don't want to ask user to first undistribute their citus local tables
- * when creating reference or distributed tables from them.
+ * when creating distributed tables from them.
* For this reason, here we undistribute citus local tables beforehand.
* But since UndistributeTable does not support undistributing relations
* involved in foreign key relationships, we first drop foreign keys that
@@ -1107,6 +1136,13 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
List *originalForeignKeyRecreationCommands = NIL;
if (IsCitusTableType(relationId, CITUS_LOCAL_TABLE))
{
+ /*
+ * We use ConvertCitusLocalTableToTableType instead of CreateCitusTable
+ * to create a reference table or a single-shard table from a Citus
+ * local table.
+ */
+ Assert(tableType != REFERENCE_TABLE && tableType != SINGLE_SHARD_DISTRIBUTED);
+
/* store foreign key creation commands that relation is involved */
originalForeignKeyRecreationCommands =
GetFKeyCreationCommandsRelationInvolvedWithTableType(relationId,
@@ -1220,25 +1256,36 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
colocationId, citusTableParams.replicationModel,
autoConverted);
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /*
+ * PG16+ supports truncate triggers on foreign tables
+ */
+ if (RegularTable(relationId) || IsForeignTable(relationId))
+#else
+
/* foreign tables do not support TRUNCATE trigger */
if (RegularTable(relationId))
+#endif
{
CreateTruncateTrigger(relationId);
}
- /* create shards for hash distributed and reference tables */
if (tableType == HASH_DISTRIBUTED)
{
+ /* create shards for hash distributed table */
CreateHashDistributedTableShards(relationId, distributedTableParams->shardCount,
colocatedTableId,
localTableEmpty);
}
else if (tableType == REFERENCE_TABLE)
{
+ /* create shards for reference table */
CreateReferenceTableShard(relationId);
}
else if (tableType == SINGLE_SHARD_DISTRIBUTED)
{
+ /* create the shard of given single-shard distributed table */
CreateSingleShardTableShard(relationId, colocatedTableId,
colocationId);
}
@@ -1319,6 +1366,206 @@ CreateCitusTable(Oid relationId, CitusTableType tableType,
}
+/*
+ * ConvertCitusLocalTableToTableType converts given Citus local table to
+ * given table type.
+ *
+ * This only supports converting Citus local tables to reference tables
+ * (by replicating the shard to workers) and single-shard distributed
+ * tables (by replicating the shard to the appropriate worker and dropping
+ * the local one).
+ */
+static void
+ConvertCitusLocalTableToTableType(Oid relationId, CitusTableType tableType,
+ DistributedTableParams *distributedTableParams)
+{
+ if (!IsCitusTableType(relationId, CITUS_LOCAL_TABLE))
+ {
+ ereport(ERROR, (errmsg("table is not a local table added to metadata")));
+ }
+
+ if (tableType != REFERENCE_TABLE && tableType != SINGLE_SHARD_DISTRIBUTED)
+ {
+ ereport(ERROR, (errmsg("table type is not supported for conversion")));
+ }
+
+ if ((tableType == SINGLE_SHARD_DISTRIBUTED) != (distributedTableParams != NULL))
+ {
+ ereport(ERROR, (errmsg("distributed table params must be provided "
+ "when creating a distributed table and must "
+ "not be otherwise")));
+ }
+
+ EnsureCitusTableCanBeCreated(relationId);
+
+ Relation relation = try_relation_open(relationId, ExclusiveLock);
+ if (relation == NULL)
+ {
+ ereport(ERROR, (errmsg("could not create Citus table: "
+ "relation does not exist")));
+ }
+
+ relation_close(relation, NoLock);
+
+ if (tableType == SINGLE_SHARD_DISTRIBUTED && ShardReplicationFactor > 1)
+ {
+ ereport(ERROR, (errmsg("could not create single shard table: "
+ "citus.shard_replication_factor is greater than 1"),
+ errhint("Consider setting citus.shard_replication_factor to 1 "
+ "and try again")));
+ }
+
+ LockRelationOid(relationId, ExclusiveLock);
+
+ Var *distributionColumn = NULL;
+ CitusTableParams citusTableParams = DecideCitusTableParams(tableType,
+ distributedTableParams);
+
+ uint32 colocationId = INVALID_COLOCATION_ID;
+ if (distributedTableParams &&
+ distributedTableParams->colocationParam.colocationParamType ==
+ COLOCATE_WITH_COLOCATION_ID)
+ {
+ colocationId = distributedTableParams->colocationParam.colocationId;
+ }
+ else
+ {
+ colocationId = ColocationIdForNewTable(relationId, tableType,
+ distributedTableParams,
+ distributionColumn);
+ }
+
+ /* check constraints etc. on table based on new distribution params */
+ EnsureRelationCanBeDistributed(relationId, distributionColumn,
+ citusTableParams.distributionMethod,
+ colocationId, citusTableParams.replicationModel);
+
+ /*
+ * Regarding the foreign key relationships that given relation is involved,
+ * EnsureRelationCanBeDistributed() only checks the ones where the relation
+ * is the referencing table. And given that the table at hand is a Citus
+ * local table, right now it may only be referenced by a reference table
+ * or a Citus local table. However, given that neither of those two cases
+ * are not applicable for a distributed table, here we throw an error if
+ * that's the case.
+ *
+ * Note that we don't need to check the same if we're creating a reference
+ * table from a Citus local table because all the foreign keys referencing
+ * Citus local tables are supported by reference tables.
+ */
+ if (tableType == SINGLE_SHARD_DISTRIBUTED)
+ {
+ EnsureNoFKeyFromTableType(relationId, INCLUDE_CITUS_LOCAL_TABLES |
+ INCLUDE_REFERENCE_TABLES);
+ }
+
+ EnsureReferenceTablesExistOnAllNodes();
+
+ LockColocationId(colocationId, ShareLock);
+
+ /*
+ * When converting to a single shard table, we want to drop the placement
+ * on the coordinator, but only if transferring to a different node. In that
+ * case, shouldDropLocalPlacement is true. When converting to a reference
+ * table, we always keep the placement on the coordinator, so for reference
+ * tables shouldDropLocalPlacement is always false.
+ */
+ bool shouldDropLocalPlacement = false;
+
+ List *targetNodeList = NIL;
+ if (tableType == SINGLE_SHARD_DISTRIBUTED)
+ {
+ uint32 targetNodeId = SingleShardTableColocationNodeId(colocationId);
+ if (targetNodeId != CoordinatorNodeIfAddedAsWorkerOrError()->nodeId)
+ {
+ bool missingOk = false;
+ WorkerNode *targetNode = FindNodeWithNodeId(targetNodeId, missingOk);
+ targetNodeList = list_make1(targetNode);
+
+ shouldDropLocalPlacement = true;
+ }
+ }
+ else if (tableType == REFERENCE_TABLE)
+ {
+ targetNodeList = ActivePrimaryNonCoordinatorNodeList(ShareLock);
+ targetNodeList = SortList(targetNodeList, CompareWorkerNodes);
+ }
+
+ bool autoConverted = false;
+ UpdateNoneDistTableMetadataGlobally(
+ relationId, citusTableParams.replicationModel,
+ colocationId, autoConverted);
+
+ /* create the shard placement on workers and insert into pg_dist_placement globally */
+ if (list_length(targetNodeList) > 0)
+ {
+ NoneDistTableReplicateCoordinatorPlacement(relationId, targetNodeList);
+ }
+
+ if (shouldDropLocalPlacement)
+ {
+ /*
+ * We don't yet drop the local placement before handling partitions.
+ * Otherewise, local shard placements of the partitions will be gone
+ * before we create them on workers.
+ *
+ * However, we need to delete the related entry from pg_dist_placement
+ * before distributing partitions (if any) because we need a sane metadata
+ * state before doing so.
+ */
+ NoneDistTableDeleteCoordinatorPlacement(relationId);
+ }
+
+ /* if this table is partitioned table, distribute its partitions too */
+ if (PartitionedTable(relationId))
+ {
+ /* right now we don't allow partitioned reference tables */
+ Assert(tableType == SINGLE_SHARD_DISTRIBUTED);
+
+ List *partitionList = PartitionList(relationId);
+
+ char *parentRelationName = generate_qualified_relation_name(relationId);
+
+ /*
+ * When there are many partitions, each call to
+ * ConvertCitusLocalTableToTableType accumulates used memory.
+ * Create and free citus_per_partition_context for each call.
+ */
+ MemoryContext citusPartitionContext =
+ AllocSetContextCreate(CurrentMemoryContext,
+ "citus_per_partition_context",
+ ALLOCSET_DEFAULT_SIZES);
+ MemoryContext oldContext = MemoryContextSwitchTo(citusPartitionContext);
+
+ Oid partitionRelationId = InvalidOid;
+ foreach_oid(partitionRelationId, partitionList)
+ {
+ MemoryContextReset(citusPartitionContext);
+
+ DistributedTableParams childDistributedTableParams = {
+ .colocationParam = {
+ .colocationParamType = COLOCATE_WITH_TABLE_LIKE_OPT,
+ .colocateWithTableName = parentRelationName,
+ },
+ .shardCount = distributedTableParams->shardCount,
+ .shardCountIsStrict = false,
+ .distributionColumnName = distributedTableParams->distributionColumnName,
+ };
+ ConvertCitusLocalTableToTableType(partitionRelationId, tableType,
+ &childDistributedTableParams);
+ }
+
+ MemoryContextSwitchTo(oldContext);
+ MemoryContextDelete(citusPartitionContext);
+ }
+
+ if (shouldDropLocalPlacement)
+ {
+ NoneDistTableDropCoordinatorPlacementTable(relationId);
+ }
+}
+
+
/*
* DecideCitusTableParams decides CitusTableParams based on given CitusTableType
* and DistributedTableParams if it's a distributed table.
@@ -1421,6 +1668,7 @@ PropagatePrerequisiteObjectsForDistributedTable(Oid relationId)
ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*tableAddress, RelationRelationId, relationId);
EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress));
+ TrackPropagatedTableAndSequences(relationId);
}
@@ -1664,7 +1912,7 @@ CreateHashDistributedTableShards(Oid relationId, int shardCount,
/*
- * CreateHashDistributedTableShards creates the shard of given single-shard
+ * CreateSingleShardTableShard creates the shard of given single-shard
* distributed table.
*/
static void
@@ -2381,9 +2629,37 @@ RegularTable(Oid relationId)
/*
- * CopyLocalDataIntoShards copies data from the local table, which is hidden
- * after converting it to a distributed table, into the shards of the distributed
- * table. For partitioned tables, this functions returns without copying the data
+ * CopyLocalDataIntoShards is a wrapper around CopyFromLocalTableIntoDistTable
+ * to copy data from the local table, which is hidden after converting it to a
+ * distributed table, into the shards of the distributed table.
+ *
+ * After copying local data into the distributed table, the local data remains
+ * in place and should be truncated at a later time.
+ */
+static void
+CopyLocalDataIntoShards(Oid distributedTableId)
+{
+ uint64 rowsCopied = CopyFromLocalTableIntoDistTable(distributedTableId,
+ distributedTableId);
+ if (rowsCopied > 0)
+ {
+ char *qualifiedRelationName =
+ generate_qualified_relation_name(distributedTableId);
+ ereport(NOTICE, (errmsg("copying the data has completed"),
+ errdetail("The local data in the table is no longer visible, "
+ "but is still on disk."),
+ errhint("To remove the local data, run: SELECT "
+ "truncate_local_data_after_distributing_table($$%s$$)",
+ qualifiedRelationName)));
+ }
+}
+
+
+/*
+ * CopyFromLocalTableIntoDistTable copies data from given local table into
+ * the shards of given distributed table.
+ *
+ * For partitioned tables, this functions returns without copying the data
* because we call this function for both partitioned tables and its partitions.
* Returning early saves us from copying data to workers twice.
*
@@ -2393,35 +2669,30 @@ RegularTable(Oid relationId)
* opens a connection and starts a COPY for each shard placement that will have
* data.
*
- * We could call the planner and executor here and send the output to the
- * DestReceiver, but we are in a tricky spot here since Citus is already
- * intercepting queries on this table in the planner and executor hooks and we
- * want to read from the local table. To keep it simple, we perform a heap scan
- * directly on the table.
+ * We assume that the local table might indeed be a distributed table and the
+ * caller would want to read the local data from the shell table in that case.
+ * For this reason, to keep it simple, we perform a heap scan directly on the
+ * table instead of using SELECT.
*
- * Any writes on the table that are started during this operation will be handled
- * as distributed queries once the current transaction commits. SELECTs will
- * continue to read from the local table until the current transaction commits,
- * after which new SELECTs will be handled as distributed queries.
- *
- * After copying local data into the distributed table, the local data remains
- * in place and should be truncated at a later time.
+ * We read from the table and pass each tuple to the CitusCopyDestReceiver which
+ * opens a connection and starts a COPY for each shard placement that will have
+ * data.
*/
-static void
-CopyLocalDataIntoShards(Oid distributedRelationId)
+uint64
+CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId)
{
/* take an ExclusiveLock to block all operations except SELECT */
- Relation distributedRelation = table_open(distributedRelationId, ExclusiveLock);
+ Relation localRelation = table_open(localTableId, ExclusiveLock);
/*
* Skip copying from partitioned tables, we will copy the data from
* partition to partition's shards.
*/
- if (PartitionedTable(distributedRelationId))
+ if (PartitionedTable(distributedTableId))
{
- table_close(distributedRelation, NoLock);
+ table_close(localRelation, NoLock);
- return;
+ return 0;
}
/*
@@ -2435,35 +2706,43 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
*/
PushActiveSnapshot(GetLatestSnapshot());
- /* get the table columns */
- TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
- TupleTableSlot *slot = table_slot_create(distributedRelation, NULL);
- List *columnNameList = TupleDescColumnNameList(tupleDescriptor);
+ Relation distributedRelation = RelationIdGetRelation(distributedTableId);
+
+ /* get the table columns for distributed table */
+ TupleDesc destTupleDescriptor = RelationGetDescr(distributedRelation);
+ List *columnNameList = TupleDescColumnNameList(destTupleDescriptor);
+
+ RelationClose(distributedRelation);
int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX;
/* determine the partition column in the tuple descriptor */
- Var *partitionColumn = PartitionColumn(distributedRelationId, 0);
+ Var *partitionColumn = PartitionColumn(distributedTableId, 0);
if (partitionColumn != NULL)
{
partitionColumnIndex = partitionColumn->varattno - 1;
}
+ /* create tuple slot for local relation */
+ TupleDesc sourceTupleDescriptor = RelationGetDescr(localRelation);
+ TupleTableSlot *slot = table_slot_create(localRelation, NULL);
+
/* initialise per-tuple memory context */
EState *estate = CreateExecutorState();
ExprContext *econtext = GetPerTupleExprContext(estate);
econtext->ecxt_scantuple = slot;
const bool nonPublishableData = false;
DestReceiver *copyDest =
- (DestReceiver *) CreateCitusCopyDestReceiver(distributedRelationId,
+ (DestReceiver *) CreateCitusCopyDestReceiver(distributedTableId,
columnNameList,
partitionColumnIndex,
estate, NULL, nonPublishableData);
/* initialise state for writing to shards, we'll open connections on demand */
- copyDest->rStartup(copyDest, 0, tupleDescriptor);
+ copyDest->rStartup(copyDest, 0, sourceTupleDescriptor);
- DoCopyFromLocalTableIntoShards(distributedRelation, copyDest, slot, estate);
+ uint64 rowsCopied = DoCopyFromLocalTableIntoShards(localRelation, copyDest, slot,
+ estate);
/* finish writing into the shards */
copyDest->rShutdown(copyDest);
@@ -2472,24 +2751,28 @@ CopyLocalDataIntoShards(Oid distributedRelationId)
/* free memory and close the relation */
ExecDropSingleTupleTableSlot(slot);
FreeExecutorState(estate);
- table_close(distributedRelation, NoLock);
+ table_close(localRelation, NoLock);
PopActiveSnapshot();
+
+ return rowsCopied;
}
/*
* DoCopyFromLocalTableIntoShards performs a copy operation
* from local tables into shards.
+ *
+ * Returns the number of rows copied.
*/
-static void
-DoCopyFromLocalTableIntoShards(Relation distributedRelation,
+static uint64
+DoCopyFromLocalTableIntoShards(Relation localRelation,
DestReceiver *copyDest,
TupleTableSlot *slot,
EState *estate)
{
/* begin reading from local table */
- TableScanDesc scan = table_beginscan(distributedRelation, GetActiveSnapshot(), 0,
+ TableScanDesc scan = table_beginscan(localRelation, GetActiveSnapshot(), 0,
NULL);
MemoryContext oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
@@ -2524,22 +2807,12 @@ DoCopyFromLocalTableIntoShards(Relation distributedRelation,
ereport(DEBUG1, (errmsg("Copied " UINT64_FORMAT " rows", rowsCopied)));
}
- if (rowsCopied > 0)
- {
- char *qualifiedRelationName =
- generate_qualified_relation_name(RelationGetRelid(distributedRelation));
- ereport(NOTICE, (errmsg("copying the data has completed"),
- errdetail("The local data in the table is no longer visible, "
- "but is still on disk."),
- errhint("To remove the local data, run: SELECT "
- "truncate_local_data_after_distributing_table($$%s$$)",
- qualifiedRelationName)));
- }
-
MemoryContextSwitchTo(oldContext);
/* finish reading from the local table */
table_endscan(scan);
+
+ return rowsCopied;
}
diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c
index 208d570eb..944ff627d 100644
--- a/src/backend/distributed/commands/database.c
+++ b/src/backend/distributed/commands/database.c
@@ -31,6 +31,8 @@
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
static Oid get_database_owner(Oid db_oid);
+List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext);
/* controlled via GUC */
bool EnableAlterDatabaseOwner = true;
@@ -107,3 +109,136 @@ get_database_owner(Oid db_oid)
return dba;
}
+
+
+/*
+ * PreprocessGrantOnDatabaseStmt is executed before the statement is applied to the local
+ * postgres instance.
+ *
+ * In this stage we can prepare the commands that need to be run on all workers to grant
+ * on databases.
+ */
+List *
+PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext)
+{
+ if (!ShouldPropagate())
+ {
+ return NIL;
+ }
+
+ GrantStmt *stmt = castNode(GrantStmt, node);
+ Assert(stmt->objtype == OBJECT_DATABASE);
+
+ List *databaseList = stmt->objects;
+
+ if (list_length(databaseList) == 0)
+ {
+ return NIL;
+ }
+
+ EnsureCoordinator();
+
+ char *sql = DeparseTreeNode((Node *) stmt);
+
+ List *commands = list_make3(DISABLE_DDL_PROPAGATION,
+ (void *) sql,
+ ENABLE_DDL_PROPAGATION);
+
+ return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
+}
+
+
+/*
+ * PreprocessAlterDatabaseStmt is executed before the statement is applied to the local
+ * postgres instance.
+ *
+ * In this stage we can prepare the commands that need to be run on all workers to grant
+ * on databases.
+ */
+List *
+PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext)
+{
+ if (!ShouldPropagate())
+ {
+ return NIL;
+ }
+
+ AlterDatabaseStmt *stmt = castNode(AlterDatabaseStmt, node);
+
+ EnsureCoordinator();
+
+ char *sql = DeparseTreeNode((Node *) stmt);
+
+ List *commands = list_make3(DISABLE_DDL_PROPAGATION,
+ (void *) sql,
+ ENABLE_DDL_PROPAGATION);
+
+ return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
+}
+
+
+#if PG_VERSION_NUM >= PG_VERSION_15
+
+/*
+ * PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local
+ * postgres instance.
+ *
+ * In this stage we can prepare the commands that need to be run on all workers to grant
+ * on databases.
+ */
+List *
+PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext)
+{
+ if (!ShouldPropagate())
+ {
+ return NIL;
+ }
+
+ AlterDatabaseRefreshCollStmt *stmt = castNode(AlterDatabaseRefreshCollStmt, node);
+
+ EnsureCoordinator();
+
+ char *sql = DeparseTreeNode((Node *) stmt);
+
+ List *commands = list_make3(DISABLE_DDL_PROPAGATION,
+ (void *) sql,
+ ENABLE_DDL_PROPAGATION);
+
+ return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
+}
+
+
+#endif
+
+
+/*
+ * PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local
+ * postgres instance.
+ *
+ * In this stage we can prepare the commands that need to be run on all workers to grant
+ * on databases.
+ */
+List *
+PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext)
+{
+ if (!ShouldPropagate())
+ {
+ return NIL;
+ }
+
+ AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node);
+
+ EnsureCoordinator();
+
+ char *sql = DeparseTreeNode((Node *) stmt);
+
+ List *commands = list_make3(DISABLE_DDL_PROPAGATION,
+ (void *) sql,
+ ENABLE_DDL_PROPAGATION);
+
+ return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
+}
diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c
index ceec83324..977efb145 100644
--- a/src/backend/distributed/commands/dependencies.c
+++ b/src/backend/distributed/commands/dependencies.c
@@ -112,15 +112,35 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
dependency->objectSubId, ExclusiveLock);
}
- WorkerNode *workerNode = NULL;
- foreach_ptr(workerNode, workerNodeList)
- {
- const char *nodeName = workerNode->workerName;
- uint32 nodePort = workerNode->workerPort;
- SendCommandListToWorkerOutsideTransaction(nodeName, nodePort,
- CitusExtensionOwnerName(),
- ddlCommands);
+ /*
+ * We need to propagate dependencies via the current user's metadata connection if
+ * any dependency for the target is created in the current transaction. Our assumption
+ * is that if we rely on a dependency created in the current transaction, then the
+ * current user, most probably, has permissions to create the target object as well.
+ * Note that, user still may not be able to create the target due to no permissions
+ * for any of its dependencies. But this is ok since it should be rare.
+ *
+ * If we opted to use a separate superuser connection for the target, then we would
+ * have visibility issues since propagated dependencies would be invisible to
+ * the separate connection until we locally commit.
+ */
+ if (HasAnyDependencyInPropagatedObjects(target))
+ {
+ SendCommandListToWorkersWithMetadata(ddlCommands);
+ }
+ else
+ {
+ WorkerNode *workerNode = NULL;
+ foreach_ptr(workerNode, workerNodeList)
+ {
+ const char *nodeName = workerNode->workerName;
+ uint32 nodePort = workerNode->workerPort;
+
+ SendCommandListToWorkerOutsideTransaction(nodeName, nodePort,
+ CitusExtensionOwnerName(),
+ ddlCommands);
+ }
}
/*
diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c
index 3442b07f2..a17d75e17 100644
--- a/src/backend/distributed/commands/distribute_object_ops.c
+++ b/src/backend/distributed/commands/distribute_object_ops.c
@@ -432,6 +432,54 @@ static DistributeObjectOps Database_AlterOwner = {
.address = AlterDatabaseOwnerObjectAddress,
.markDistributed = false,
};
+
+static DistributeObjectOps Database_Grant = {
+ .deparse = DeparseGrantOnDatabaseStmt,
+ .qualify = NULL,
+ .preprocess = PreprocessGrantOnDatabaseStmt,
+ .postprocess = NULL,
+ .objectType = OBJECT_DATABASE,
+ .operationType = DIST_OPS_ALTER,
+ .address = NULL,
+ .markDistributed = false,
+};
+
+static DistributeObjectOps Database_Alter = {
+ .deparse = DeparseAlterDatabaseStmt,
+ .qualify = NULL,
+ .preprocess = PreprocessAlterDatabaseStmt,
+ .postprocess = NULL,
+ .objectType = OBJECT_DATABASE,
+ .operationType = DIST_OPS_ALTER,
+ .address = NULL,
+ .markDistributed = false,
+};
+
+#if PG_VERSION_NUM >= PG_VERSION_15
+static DistributeObjectOps Database_RefreshColl = {
+ .deparse = DeparseAlterDatabaseRefreshCollStmt,
+ .qualify = NULL,
+ .preprocess = PreprocessAlterDatabaseRefreshCollStmt,
+ .postprocess = NULL,
+ .objectType = OBJECT_DATABASE,
+ .operationType = DIST_OPS_ALTER,
+ .address = NULL,
+ .markDistributed = false,
+};
+#endif
+
+static DistributeObjectOps Database_Set = {
+ .deparse = DeparseAlterDatabaseSetStmt,
+ .qualify = NULL,
+ .preprocess = PreprocessAlterDatabaseSetStmt,
+ .postprocess = NULL,
+ .objectType = OBJECT_DATABASE,
+ .operationType = DIST_OPS_ALTER,
+ .address = NULL,
+ .markDistributed = false,
+};
+
+
static DistributeObjectOps Domain_Alter = {
.deparse = DeparseAlterDomainStmt,
.qualify = QualifyAlterDomainStmt,
@@ -1260,7 +1308,6 @@ static DistributeObjectOps Trigger_Rename = {
.markDistributed = false,
};
-
/*
* GetDistributeObjectOps looks up the DistributeObjectOps which handles the node.
*
@@ -1271,6 +1318,25 @@ GetDistributeObjectOps(Node *node)
{
switch (nodeTag(node))
{
+ case T_AlterDatabaseStmt:
+ {
+ return &Database_Alter;
+ }
+
+#if PG_VERSION_NUM >= PG_VERSION_15
+ case T_AlterDatabaseRefreshCollStmt:
+ {
+ return &Database_RefreshColl;
+ }
+
+#endif
+
+ case T_AlterDatabaseSetStmt:
+ {
+ return &Database_Set;
+ }
+
+
case T_AlterDomainStmt:
{
return &Domain_Alter;
@@ -1911,6 +1977,11 @@ GetDistributeObjectOps(Node *node)
return &Routine_Grant;
}
+ case OBJECT_DATABASE:
+ {
+ return &Database_Grant;
+ }
+
default:
{
return &Any_Grant;
diff --git a/src/backend/distributed/commands/domain.c b/src/backend/distributed/commands/domain.c
index 392cbd6e2..82ef80c0f 100644
--- a/src/backend/distributed/commands/domain.c
+++ b/src/backend/distributed/commands/domain.c
@@ -64,7 +64,8 @@ CreateDomainStmt *
RecreateDomainStmt(Oid domainOid)
{
CreateDomainStmt *stmt = makeNode(CreateDomainStmt);
- stmt->domainname = stringToQualifiedNameList(format_type_be_qualified(domainOid));
+ stmt->domainname = stringToQualifiedNameList_compat(format_type_be_qualified(
+ domainOid));
HeapTuple tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(domainOid));
if (!HeapTupleIsValid(tup))
diff --git a/src/backend/distributed/commands/drop_distributed_table.c b/src/backend/distributed/commands/drop_distributed_table.c
index 24dd8e892..26579cd60 100644
--- a/src/backend/distributed/commands/drop_distributed_table.c
+++ b/src/backend/distributed/commands/drop_distributed_table.c
@@ -19,6 +19,7 @@
#include "distributed/coordinator_protocol.h"
#include "distributed/metadata_sync.h"
#include "distributed/multi_partitioning_utils.h"
+#include "distributed/tenant_schema_metadata.h"
#include "distributed/worker_transaction.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c
index ac4bf135e..5bddf1ede 100644
--- a/src/backend/distributed/commands/extension.c
+++ b/src/backend/distributed/commands/extension.c
@@ -50,7 +50,7 @@ static List * GetAllViews(void);
static bool ShouldPropagateExtensionCommand(Node *parseTree);
static bool IsAlterExtensionSetSchemaCitus(Node *parseTree);
static Node * RecreateExtensionStmt(Oid extensionOid);
-static List * GenerateGrantCommandsOnExtesionDependentFDWs(Oid extensionId);
+static List * GenerateGrantCommandsOnExtensionDependentFDWs(Oid extensionId);
/*
@@ -985,7 +985,7 @@ CreateExtensionDDLCommand(const ObjectAddress *extensionAddress)
/* any privilege granted on FDWs that belong to the extension should be included */
List *FDWGrants =
- GenerateGrantCommandsOnExtesionDependentFDWs(extensionAddress->objectId);
+ GenerateGrantCommandsOnExtensionDependentFDWs(extensionAddress->objectId);
ddlCommands = list_concat(ddlCommands, FDWGrants);
@@ -1048,11 +1048,11 @@ RecreateExtensionStmt(Oid extensionOid)
/*
- * GenerateGrantCommandsOnExtesionDependentFDWs returns a list of commands that GRANTs
+ * GenerateGrantCommandsOnExtensionDependentFDWs returns a list of commands that GRANTs
* the privileges on FDWs that are depending on the given extension.
*/
static List *
-GenerateGrantCommandsOnExtesionDependentFDWs(Oid extensionId)
+GenerateGrantCommandsOnExtensionDependentFDWs(Oid extensionId)
{
List *commands = NIL;
List *FDWOids = GetDependentFDWsToExtension(extensionId);
diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c
index 40ccb0ddf..7c2d50f44 100644
--- a/src/backend/distributed/commands/foreign_constraint.c
+++ b/src/backend/distributed/commands/foreign_constraint.c
@@ -132,7 +132,7 @@ EnsureNoFKeyFromTableType(Oid relationId, int tableTypeFlag)
/*
- * EnsureNoFKeyToTableType ensures that given relation is not referencing by any table specified
+ * EnsureNoFKeyToTableType ensures that given relation is not referencing any table specified
* by table type flag.
*/
void
@@ -839,6 +839,22 @@ GetForeignConstraintToReferenceTablesCommands(Oid relationId)
}
+/*
+ * GetForeignConstraintToReferenceTablesCommands takes in a relationId, and
+ * returns the list of foreign constraint commands needed to reconstruct
+ * foreign key constraints that the table is involved in as the "referenced"
+ * one and the "referencing" table is a reference table.
+ */
+List *
+GetForeignConstraintFromOtherReferenceTablesCommands(Oid relationId)
+{
+ int flags = INCLUDE_REFERENCED_CONSTRAINTS |
+ EXCLUDE_SELF_REFERENCES |
+ INCLUDE_REFERENCE_TABLES;
+ return GetForeignConstraintCommandsInternal(relationId, flags);
+}
+
+
/*
* GetForeignConstraintToDistributedTablesCommands takes in a relationId, and
* returns the list of foreign constraint commands needed to reconstruct
@@ -879,7 +895,7 @@ GetForeignConstraintCommandsInternal(Oid relationId, int flags)
List *foreignKeyCommands = NIL;
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
Oid foreignKeyOid = InvalidOid;
foreach_oid(foreignKeyOid, foreignKeyOids)
@@ -890,7 +906,7 @@ GetForeignConstraintCommandsInternal(Oid relationId, int flags)
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
return foreignKeyCommands;
}
@@ -1227,7 +1243,7 @@ GetForeignKeyOids(Oid relationId, int flags)
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], pgConstraintTargetAttrNumber,
- BTEqualStrategyNumber, F_OIDEQ, relationId);
+ BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, indexOid, useIndex,
NULL, scanKeyCount, scanKey);
diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c
index 9f579f5dc..01911677d 100644
--- a/src/backend/distributed/commands/function.c
+++ b/src/backend/distributed/commands/function.c
@@ -780,7 +780,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
ScanKeyInit(&scanKey[1], Anum_pg_dist_object_objid, BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(distAddress->objectId));
ScanKeyInit(&scanKey[2], Anum_pg_dist_object_objsubid, BTEqualStrategyNumber,
- F_INT4EQ, ObjectIdGetDatum(distAddress->objectSubId));
+ F_INT4EQ, Int32GetDatum(distAddress->objectSubId));
SysScanDesc scanDescriptor = systable_beginscan(pgDistObjectRel,
DistObjectPrimaryKeyIndexId(),
@@ -909,15 +909,14 @@ GetFunctionDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
else
{
Datum sqlTextDatum = (Datum) 0;
-
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
sqlTextDatum = DirectFunctionCall1(pg_get_functiondef,
ObjectIdGetDatum(funcOid));
createFunctionSQL = TextDatumGetCString(sqlTextDatum);
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
}
return createFunctionSQL;
diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c
index aa0715372..8271cc4f4 100644
--- a/src/backend/distributed/commands/index.c
+++ b/src/backend/distributed/commands/index.c
@@ -18,6 +18,9 @@
#include "catalog/index.h"
#include "catalog/namespace.h"
#include "catalog/pg_class.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "catalog/pg_namespace.h"
+#endif
#include "commands/defrem.h"
#include "commands/tablecmds.h"
#include "distributed/citus_ruleutils.h"
@@ -1055,8 +1058,8 @@ RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, voi
errmsg("\"%s\" is not an index", rel->relname)));
/* Allow DROP to either table owner or schema owner */
- if (!pg_class_ownercheck(relOid, GetUserId()) &&
- !pg_namespace_ownercheck(classform->relnamespace, GetUserId()))
+ if (!object_ownercheck(RelationRelationId, relOid, GetUserId()) &&
+ !object_ownercheck(NamespaceRelationId, classform->relnamespace, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, rel->relname);
}
@@ -1140,7 +1143,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelI
errmsg("\"%s\" is not an index", relation->relname)));
/* Check permissions */
- if (!pg_class_ownercheck(relId, GetUserId()))
+ if (!object_ownercheck(RelationRelationId, relId, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, relation->relname);
/* Lock heap before index to avoid deadlock. */
diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c
index 5d7c279a6..a684d06cc 100644
--- a/src/backend/distributed/commands/multi_copy.c
+++ b/src/backend/distributed/commands/multi_copy.c
@@ -83,6 +83,9 @@
#include "distributed/locally_reserved_shared_connections.h"
#include "distributed/placement_connection.h"
#include "distributed/relation_access_tracking.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "distributed/relation_utils.h"
+#endif
#include "distributed/remote_commands.h"
#include "distributed/remote_transaction.h"
#include "distributed/replication_origin_session_utils.h"
@@ -422,7 +425,8 @@ EnsureCopyCanRunOnRelation(Oid relationId)
*/
if (RecoveryInProgress() && WritableStandbyCoordinator)
{
- ereport(ERROR, (errmsg("COPY command to Citus tables is not allowed in "
+ ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
+ errmsg("COPY command to Citus tables is not allowed in "
"read-only mode"),
errhint("All COPY commands to citus tables happen via 2PC, "
"and 2PC requires the database to be in a writable state."),
@@ -1544,7 +1548,7 @@ CoerceColumnValue(Datum inputValue, CopyCoercionData *coercionPath)
{
switch (coercionPath->coercionType)
{
- case 0:
+ case COERCION_PATH_NONE:
{
return inputValue; /* this was a dropped column */
}
@@ -3149,10 +3153,17 @@ CheckCopyPermissions(CopyStmt *copyStatement)
rel = table_openrv(copyStatement->relation,
is_from ? RowExclusiveLock : AccessShareLock);
- range_table = CreateRangeTable(rel, required_access);
+ range_table = CreateRangeTable(rel);
RangeTblEntry *rte = (RangeTblEntry*) linitial(range_table);
tupDesc = RelationGetDescr(rel);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ /* create permission info for rte */
+ RTEPermissionInfo *perminfo = GetFilledPermissionInfo(rel->rd_id, rte->inh, required_access);
+#else
+ rte->requiredPerms = required_access;
+#endif
+
attnums = CopyGetAttnums(tupDesc, rel, copyStatement->attlist);
foreach(cur, attnums)
{
@@ -3160,15 +3171,29 @@ CheckCopyPermissions(CopyStmt *copyStatement)
if (is_from)
{
+#if PG_VERSION_NUM >= PG_VERSION_16
+ perminfo->insertedCols = bms_add_member(perminfo->insertedCols, attno);
+#else
rte->insertedCols = bms_add_member(rte->insertedCols, attno);
+#endif
}
else
{
+#if PG_VERSION_NUM >= PG_VERSION_16
+ perminfo->selectedCols = bms_add_member(perminfo->selectedCols, attno);
+#else
rte->selectedCols = bms_add_member(rte->selectedCols, attno);
+#endif
}
}
+#if PG_VERSION_NUM >= PG_VERSION_16
+ /* link rte to its permission info then check permissions */
+ rte->perminfoindex = 1;
+ ExecCheckPermissions(list_make1(rte), list_make1(perminfo), true);
+#else
ExecCheckRTPerms(range_table, true);
+#endif
/* TODO: Perform RLS checks once supported */
@@ -3181,13 +3206,12 @@ CheckCopyPermissions(CopyStmt *copyStatement)
* CreateRangeTable creates a range table with the given relation.
*/
List *
-CreateRangeTable(Relation rel, AclMode requiredAccess)
+CreateRangeTable(Relation rel)
{
RangeTblEntry *rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = rel->rd_id;
rte->relkind = rel->rd_rel->relkind;
- rte->requiredPerms = requiredAccess;
return list_make1(rte);
}
diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c
index 4c21b2f43..63ede986d 100644
--- a/src/backend/distributed/commands/role.c
+++ b/src/backend/distributed/commands/role.c
@@ -78,6 +78,7 @@ static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec
static VariableSetStmt * MakeVariableSetStmt(const char *config);
static int ConfigGenericNameCompare(const void *lhs, const void *rhs);
static List * RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok);
+static bool IsGrantRoleWithInheritOrSetOption(GrantRoleStmt *stmt);
/* controlled via GUC */
bool EnableCreateRolePropagation = true;
@@ -703,12 +704,13 @@ MakeSetStatementArguments(char *configurationName, char *configurationValue)
* is no other way to determine allowed units, and value types other than
* using this function
*/
- struct config_generic **gucVariables = get_guc_variables();
- int numOpts = GetNumConfigOptions();
+ int gucCount = 0;
+ struct config_generic **gucVariables = get_guc_variables_compat(&gucCount);
+
struct config_generic **matchingConfig =
(struct config_generic **) SafeBsearch((void *) &key,
(void *) gucVariables,
- numOpts,
+ gucCount,
sizeof(struct config_generic *),
ConfigGenericNameCompare);
@@ -820,7 +822,12 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options)
if (strcmp(option->defname, "adminmembers") == 0)
{
+#if PG_VERSION_NUM >= PG_VERSION_16
+ DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1);
+ grantRoleStmt->opt = list_make1(opt);
+#else
grantRoleStmt->admin_opt = true;
+#endif
}
stmts = lappend(stmts, grantRoleStmt);
@@ -868,7 +875,15 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
grantRoleStmt->grantor = NULL;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (membership->admin_option)
+ {
+ DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1);
+ grantRoleStmt->opt = list_make1(opt);
+ }
+#else
grantRoleStmt->admin_opt = membership->admin_option;
+#endif
stmts = lappend(stmts, grantRoleStmt);
}
@@ -1127,6 +1142,19 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString,
return NIL;
}
+ if (IsGrantRoleWithInheritOrSetOption(stmt))
+ {
+ if (EnableUnsupportedFeatureMessages)
+ {
+ ereport(NOTICE, (errmsg("not propagating GRANT/REVOKE commands with specified"
+ " INHERIT/SET options to worker nodes"),
+ errhint(
+ "Connect to worker nodes directly to manually run the same"
+ " GRANT/REVOKE command after disabling DDL propagation.")));
+ }
+ return NIL;
+ }
+
/*
* Postgres don't seem to use the grantor. Even dropping the grantor doesn't
* seem to affect the membership. If this changes, we might need to add grantors
@@ -1176,6 +1204,27 @@ PostprocessGrantRoleStmt(Node *node, const char *queryString)
}
+/*
+ * IsGrantRoleWithInheritOrSetOption returns true if the given
+ * GrantRoleStmt has inherit or set option specified in its options
+ */
+static bool
+IsGrantRoleWithInheritOrSetOption(GrantRoleStmt *stmt)
+{
+#if PG_VERSION_NUM >= PG_VERSION_16
+ DefElem *opt = NULL;
+ foreach_ptr(opt, stmt->opt)
+ {
+ if (strcmp(opt->defname, "inherit") == 0 || strcmp(opt->defname, "set") == 0)
+ {
+ return true;
+ }
+ }
+#endif
+ return false;
+}
+
+
/*
* ConfigGenericNameCompare compares two config_generic structs based on their
* name fields. If the name fields contain the same strings two structs are
diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c
index 6eaacc993..d48a73647 100644
--- a/src/backend/distributed/commands/schema.c
+++ b/src/backend/distributed/commands/schema.c
@@ -369,7 +369,7 @@ SchemaHasDistributedTableWithFKey(char *schemaName)
Relation pgClass = table_open(RelationRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber,
- F_OIDEQ, namespaceOid);
+ F_OIDEQ, ObjectIdGetDatum(namespaceOid));
SysScanDesc scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL,
scanKeyCount, scanKey);
diff --git a/src/backend/distributed/commands/schema_based_sharding.c b/src/backend/distributed/commands/schema_based_sharding.c
index b717cb5ae..65d2b8127 100644
--- a/src/backend/distributed/commands/schema_based_sharding.c
+++ b/src/backend/distributed/commands/schema_based_sharding.c
@@ -21,6 +21,7 @@
#include "distributed/metadata_sync.h"
#include "distributed/metadata/distobject.h"
#include "distributed/multi_partitioning_utils.h"
+#include "distributed/shard_transfer.h"
#include "distributed/tenant_schema_metadata.h"
#include "distributed/worker_shard_visibility.h"
#include "utils/builtins.h"
@@ -29,6 +30,16 @@
#include "utils/syscache.h"
+/* return value of CreateCitusMoveSchemaParams() */
+typedef struct
+{
+ uint64 anchorShardId;
+ uint32 sourceNodeId;
+ char *sourceNodeName;
+ uint32 sourceNodePort;
+} CitusMoveSchemaParams;
+
+
static void UnregisterTenantSchemaGlobally(Oid schemaId, char *schemaName);
static List * SchemaGetNonShardTableIdList(Oid schemaId);
static void EnsureSchemaCanBeDistributed(Oid schemaId, List *schemaTableIdList);
@@ -36,10 +47,14 @@ static void EnsureTenantSchemaNameAllowed(Oid schemaId);
static void EnsureTableKindSupportedForTenantSchema(Oid relationId);
static void EnsureFKeysForTenantTable(Oid relationId);
static void EnsureSchemaExist(Oid schemaId);
+static CitusMoveSchemaParams * CreateCitusMoveSchemaParams(Oid schemaId);
+static uint64 TenantSchemaPickAnchorShardId(Oid schemaId);
+
/* controlled via citus.enable_schema_based_sharding GUC */
bool EnableSchemaBasedSharding = false;
+
const char *TenantOperationNames[TOTAL_TENANT_OPERATION] = {
"undistribute_table",
"alter_distributed_table",
@@ -52,6 +67,8 @@ const char *TenantOperationNames[TOTAL_TENANT_OPERATION] = {
PG_FUNCTION_INFO_V1(citus_internal_unregister_tenant_schema_globally);
PG_FUNCTION_INFO_V1(citus_schema_distribute);
PG_FUNCTION_INFO_V1(citus_schema_undistribute);
+PG_FUNCTION_INFO_V1(citus_schema_move);
+PG_FUNCTION_INFO_V1(citus_schema_move_with_nodeid);
/*
* ShouldUseSchemaBasedSharding returns true if schema given name should be
@@ -757,6 +774,139 @@ citus_schema_undistribute(PG_FUNCTION_ARGS)
}
+/*
+ * citus_schema_move moves the shards that belong to given distributed tenant
+ * schema from one node to the other node by using citus_move_shard_placement().
+ */
+Datum
+citus_schema_move(PG_FUNCTION_ARGS)
+{
+ CheckCitusVersion(ERROR);
+ EnsureCoordinator();
+
+ Oid schemaId = PG_GETARG_OID(0);
+ CitusMoveSchemaParams *params = CreateCitusMoveSchemaParams(schemaId);
+
+ DirectFunctionCall6(citus_move_shard_placement,
+ UInt64GetDatum(params->anchorShardId),
+ CStringGetTextDatum(params->sourceNodeName),
+ UInt32GetDatum(params->sourceNodePort),
+ PG_GETARG_DATUM(1),
+ PG_GETARG_DATUM(2),
+ PG_GETARG_DATUM(3));
+ PG_RETURN_VOID();
+}
+
+
+/*
+ * citus_schema_move_with_nodeid does the same as citus_schema_move(), but
+ * accepts node id as parameter instead of hostname and port, hence uses
+ * citus_move_shard_placement_with_nodeid().
+ */
+Datum
+citus_schema_move_with_nodeid(PG_FUNCTION_ARGS)
+{
+ CheckCitusVersion(ERROR);
+ EnsureCoordinator();
+
+ Oid schemaId = PG_GETARG_OID(0);
+ CitusMoveSchemaParams *params = CreateCitusMoveSchemaParams(schemaId);
+
+ DirectFunctionCall4(citus_move_shard_placement_with_nodeid,
+ UInt64GetDatum(params->anchorShardId),
+ UInt32GetDatum(params->sourceNodeId),
+ PG_GETARG_DATUM(1),
+ PG_GETARG_DATUM(2));
+ PG_RETURN_VOID();
+}
+
+
+/*
+ * CreateCitusMoveSchemaParams is a helper function for
+ * citus_schema_move() and citus_schema_move_with_nodeid()
+ * that validates input schema and returns the parameters to be used in underlying
+ * shard transfer functions.
+ */
+static CitusMoveSchemaParams *
+CreateCitusMoveSchemaParams(Oid schemaId)
+{
+ EnsureSchemaExist(schemaId);
+ EnsureSchemaOwner(schemaId);
+
+ if (!IsTenantSchema(schemaId))
+ {
+ ereport(ERROR, (errmsg("schema %s is not a distributed schema",
+ get_namespace_name(schemaId))));
+ }
+
+ uint64 anchorShardId = TenantSchemaPickAnchorShardId(schemaId);
+ if (anchorShardId == INVALID_SHARD_ID)
+ {
+ ereport(ERROR, (errmsg("cannot move distributed schema %s because it is empty",
+ get_namespace_name(schemaId))));
+ }
+
+ uint32 colocationId = SchemaIdGetTenantColocationId(schemaId);
+ uint32 sourceNodeId = SingleShardTableColocationNodeId(colocationId);
+
+ bool missingOk = false;
+ WorkerNode *sourceNode = FindNodeWithNodeId(sourceNodeId, missingOk);
+
+ CitusMoveSchemaParams *params = palloc0(sizeof(CitusMoveSchemaParams));
+ params->anchorShardId = anchorShardId;
+ params->sourceNodeId = sourceNodeId;
+ params->sourceNodeName = sourceNode->workerName;
+ params->sourceNodePort = sourceNode->workerPort;
+ return params;
+}
+
+
+/*
+ * TenantSchemaPickAnchorShardId returns the id of one of the shards
+ * created in given tenant schema.
+ *
+ * Returns INVALID_SHARD_ID if the schema was initially empty or if it's not
+ * a tenant schema.
+ *
+ * Throws an error if all the tables in the schema are concurrently dropped.
+ */
+static uint64
+TenantSchemaPickAnchorShardId(Oid schemaId)
+{
+ uint32 colocationId = SchemaIdGetTenantColocationId(schemaId);
+ List *tablesInSchema = ColocationGroupTableList(colocationId, 0);
+ if (list_length(tablesInSchema) == 0)
+ {
+ return INVALID_SHARD_ID;
+ }
+
+ Oid relationId = InvalidOid;
+ foreach_oid(relationId, tablesInSchema)
+ {
+ /*
+ * Make sure the relation isn't dropped for the remainder of
+ * the transaction.
+ */
+ LockRelationOid(relationId, AccessShareLock);
+
+ /*
+ * The relation might have been dropped just before we locked it.
+ * Let's look it up.
+ */
+ Relation relation = RelationIdGetRelation(relationId);
+ if (RelationIsValid(relation))
+ {
+ /* relation still exists, we can use it */
+ RelationClose(relation);
+ return GetFirstShardId(relationId);
+ }
+ }
+
+ ereport(ERROR, (errmsg("tables in schema %s are concurrently dropped",
+ get_namespace_name(schemaId))));
+}
+
+
/*
* ErrorIfTenantTable errors out with the given operation name,
* if the given relation is a tenant table.
diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c
index a65d6c0fe..dae72ada9 100644
--- a/src/backend/distributed/commands/statistics.c
+++ b/src/backend/distributed/commands/statistics.c
@@ -77,6 +77,14 @@ PreprocessCreateStatisticsStmt(Node *node, const char *queryString,
EnsureCoordinator();
+ if (!(stmt->defnames))
+ {
+ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot create statistics without a name on a "
+ "Citus table"),
+ errhint("Consider specifying a name for the statistics")));
+ }
+
QualifyTreeNode((Node *) stmt);
Oid statsOid = get_statistics_object_oid(stmt->defnames, true);
@@ -522,7 +530,7 @@ GetExplicitStatisticsCommandList(Oid relationId)
RelationClose(relation);
/* generate fully-qualified names */
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
Oid statisticsId = InvalidOid;
foreach_oid(statisticsId, statisticsIdList)
@@ -571,7 +579,7 @@ GetExplicitStatisticsCommandList(Oid relationId)
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
return explicitStatisticsCommandList;
}
diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c
index 390a81286..500c6f3f2 100644
--- a/src/backend/distributed/commands/table.c
+++ b/src/backend/distributed/commands/table.c
@@ -76,6 +76,8 @@ static void DistributePartitionUsingParent(Oid parentRelationId,
static void ErrorIfMultiLevelPartitioning(Oid parentRelationId, Oid partitionRelationId);
static void ErrorIfAttachCitusTableToPgLocalTable(Oid parentRelationId,
Oid partitionRelationId);
+static bool DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement,
+ AlterTableCmd *addColumnSubCommand);
static bool ATDefinesFKeyBetweenPostgresAndCitusLocalOrRef(
AlterTableStmt *alterTableStatement);
static bool ShouldMarkConnectedRelationsNotAutoConverted(Oid leftRelationId,
@@ -101,8 +103,6 @@ static List * GetRelationIdListFromRangeVarList(List *rangeVarList, LOCKMODE loc
static bool AlterTableCommandTypeIsTrigger(AlterTableType alterTableType);
static bool AlterTableDropsForeignKey(AlterTableStmt *alterTableStatement);
static void ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement);
-static List * InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
- const char *commandString);
static bool AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement,
AlterTableCmd *command);
static bool AlterColumnInvolvesIdentityColumn(AlterTableStmt *alterTableStatement,
@@ -120,7 +120,8 @@ static void SetInterShardDDLTaskRelationShardList(Task *task,
static Oid get_attrdef_oid(Oid relationId, AttrNumber attnum);
static char * GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId,
- char *colname, TypeName *typeName);
+ char *colname, TypeName *typeName,
+ bool ifNotExists);
static void ErrorIfAlterTableDropTableNameFromPostgresFdw(List *optionList, Oid
relationId);
@@ -1028,30 +1029,7 @@ PreprocessAlterTableAddConstraint(AlterTableStmt *alterTableStatement, Oid
relationId,
Constraint *constraint)
{
- /*
- * We should only preprocess an ADD CONSTRAINT command if we have empty conname
- * This only happens when we have to create a constraint name in citus since the client does
- * not specify a name.
- * indexname should also be NULL to make sure this is not an
- * ADD {PRIMARY KEY, UNIQUE} USING INDEX command
- * which doesn't need a conname since the indexname will be used
- */
- Assert(constraint->conname == NULL && constraint->indexname == NULL);
-
- Relation rel = RelationIdGetRelation(relationId);
-
- /*
- * Change the alterTableCommand so that the standard utility
- * hook runs it with the name we created.
- */
-
- constraint->conname = GenerateConstraintName(RelationGetRelationName(rel),
- RelationGetNamespace(rel),
- constraint);
-
- RelationClose(rel);
-
- SwitchToSequentialAndLocalExecutionIfConstraintNameTooLong(relationId, constraint);
+ PrepareAlterTableStmtForConstraint(alterTableStatement, relationId, constraint);
char *ddlCommand = DeparseTreeNode((Node *) alterTableStatement);
@@ -1067,11 +1045,6 @@ PreprocessAlterTableAddConstraint(AlterTableStmt *alterTableStatement, Oid
Oid rightRelationId = RangeVarGetRelid(constraint->pktable, NoLock,
false);
- if (IsCitusTableType(rightRelationId, REFERENCE_TABLE))
- {
- EnsureSequentialModeForAlterTableOperation();
- }
-
/*
* If one of the relations involved in the FOREIGN KEY constraint is not a distributed table, citus errors out eventually.
* PreprocessAlterTableStmt function returns an empty tasklist in those cases.
@@ -1099,6 +1072,47 @@ PreprocessAlterTableAddConstraint(AlterTableStmt *alterTableStatement, Oid
}
+/*
+ * PrepareAlterTableStmtForConstraint assigns a name to the constraint if it
+ * does not have one and switches to sequential and local execution if the
+ * constraint name is too long.
+ */
+void
+PrepareAlterTableStmtForConstraint(AlterTableStmt *alterTableStatement,
+ Oid relationId,
+ Constraint *constraint)
+{
+ if (constraint->conname == NULL && constraint->indexname == NULL)
+ {
+ Relation rel = RelationIdGetRelation(relationId);
+
+ /*
+ * Change the alterTableCommand so that the standard utility
+ * hook runs it with the name we created.
+ */
+
+ constraint->conname = GenerateConstraintName(RelationGetRelationName(rel),
+ RelationGetNamespace(rel),
+ constraint);
+
+ RelationClose(rel);
+ }
+
+ SwitchToSequentialAndLocalExecutionIfConstraintNameTooLong(relationId, constraint);
+
+ if (constraint->contype == CONSTR_FOREIGN)
+ {
+ Oid rightRelationId = RangeVarGetRelid(constraint->pktable, NoLock,
+ false);
+
+ if (IsCitusTableType(rightRelationId, REFERENCE_TABLE))
+ {
+ EnsureSequentialModeForAlterTableOperation();
+ }
+ }
+}
+
+
/*
* PreprocessAlterTableStmt determines whether a given ALTER TABLE statement
* involves a distributed table. If so (and if the statement does not use
@@ -1267,6 +1281,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
* we also set skip_validation to true to prevent PostgreSQL to verify validity
* of the foreign constraint in master. Validity will be checked in workers
* anyway.
+ * - an ADD COLUMN .. that is the only subcommand in the list OR
* - an ADD COLUMN .. DEFAULT nextval('..') OR
* an ADD COLUMN .. SERIAL pseudo-type OR
* an ALTER COLUMN .. SET DEFAULT nextval('..'). If there is we set
@@ -1396,13 +1411,6 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
}
else if (alterTableType == AT_AddColumn)
{
- /*
- * TODO: This code path is nothing beneficial since we do not
- * support ALTER TABLE %s ADD COLUMN %s [constraint] for foreign keys.
- * However, the code is kept in case we fix the constraint
- * creation without a name and allow foreign key creation with the mentioned
- * command.
- */
ColumnDef *columnDefinition = (ColumnDef *) command->def;
List *columnConstraints = columnDefinition->constraints;
@@ -1426,12 +1434,36 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
}
}
+ if (DeparserSupportsAlterTableAddColumn(alterTableStatement, command))
+ {
+ deparseAT = true;
+
+ constraint = NULL;
+ foreach_ptr(constraint, columnConstraints)
+ {
+ if (ConstrTypeCitusCanDefaultName(constraint->contype))
+ {
+ PrepareAlterTableStmtForConstraint(alterTableStatement,
+ leftRelationId,
+ constraint);
+ }
+ }
+
+ /*
+ * Copy the constraints to the new subcommand because now we
+ * might have assigned names to some of them.
+ */
+ ColumnDef *newColumnDef = (ColumnDef *) newCmd->def;
+ newColumnDef->constraints = copyObject(columnConstraints);
+ }
+
/*
* We check for ADD COLUMN .. DEFAULT expr
* if expr contains nextval('user_defined_seq')
* we should deparse the statement
*/
constraint = NULL;
+ int constraintIdx = 0;
foreach_ptr(constraint, columnConstraints)
{
if (constraint->contype == CONSTR_DEFAULT)
@@ -1447,14 +1479,19 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
deparseAT = true;
useInitialDDLCommandString = false;
- /* the new column definition will have no constraint */
- ColumnDef *newColDef = copyObject(columnDefinition);
- newColDef->constraints = NULL;
-
- newCmd->def = (Node *) newColDef;
+ /* drop the default expression from new subcomand */
+ ColumnDef *newColumnDef = (ColumnDef *) newCmd->def;
+ newColumnDef->constraints =
+ list_delete_nth_cell(newColumnDef->constraints,
+ constraintIdx);
}
}
+
+ /* there can only be one DEFAULT constraint that can be used per column */
+ break;
}
+
+ constraintIdx++;
}
@@ -1638,6 +1675,49 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
}
+/*
+ * DeparserSupportsAlterTableAddColumn returns true if it's safe to deparse
+ * the given ALTER TABLE statement that is known to contain given ADD COLUMN
+ * subcommand.
+ */
+static bool
+DeparserSupportsAlterTableAddColumn(AlterTableStmt *alterTableStatement,
+ AlterTableCmd *addColumnSubCommand)
+{
+ /*
+ * We support deparsing for ADD COLUMN only of it's the only
+ * subcommand.
+ */
+ if (list_length(alterTableStatement->cmds) == 1 &&
+ alterTableStatement->objtype == OBJECT_TABLE)
+ {
+ ColumnDef *columnDefinition = (ColumnDef *) addColumnSubCommand->def;
+ Constraint *constraint = NULL;
+ foreach_ptr(constraint, columnDefinition->constraints)
+ {
+ if (constraint->contype == CONSTR_CHECK)
+ {
+ /*
+ * Given that we're in the preprocess, any reference to the
+ * column that we're adding would break the deparser. This
+ * can only be the case with CHECK constraints. For this
+ * reason, we skip deparsing the command and fall back to
+ * legacy behavior that we follow for ADD COLUMN subcommands.
+ *
+ * For other constraint types, we prepare the constraint to
+ * make sure that we can deparse it.
+ */
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+
/*
* ATDefinesFKeyBetweenPostgresAndCitusLocalOrRef returns true if given
* alter table command defines foreign key between a postgres table and a
@@ -2637,7 +2717,9 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement)
columnDefinition
->colname,
columnDefinition
- ->typeName);
+ ->typeName,
+ command->
+ missing_ok);
}
}
}
@@ -2902,7 +2984,7 @@ GetAlterColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colna
*/
static char *
GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colname,
- TypeName *typeName)
+ TypeName *typeName, bool ifNotExists)
{
char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid);
char *qualifiedRelationName = generate_qualified_relation_name(relationId);
@@ -2927,8 +3009,9 @@ GetAddColumnWithNextvalDefaultCmd(Oid sequenceOid, Oid relationId, char *colname
StringInfoData str = { 0 };
initStringInfo(&str);
appendStringInfo(&str,
- "ALTER TABLE %s ADD COLUMN %s %s "
- "DEFAULT %s(%s::regclass)", qualifiedRelationName, colname,
+ "ALTER TABLE %s ADD COLUMN %s %s %s "
+ "DEFAULT %s(%s::regclass)", qualifiedRelationName,
+ ifNotExists ? "IF NOT EXISTS" : "", colname,
format_type_extended(typeOid, typmod, formatFlags),
quote_qualified_identifier("pg_catalog", nextvalFunctionName),
quote_literal_cstr(qualifiedSequenceName));
@@ -3676,13 +3759,6 @@ SetupExecutionModeForAlterTable(Oid relationId, AlterTableCmd *command)
}
else if (alterTableType == AT_AddColumn)
{
- /*
- * TODO: This code path will never be executed since we do not
- * support foreign constraint creation via
- * ALTER TABLE %s ADD COLUMN %s [constraint]. However, the code
- * is kept in case we fix the constraint creation without a name
- * and allow foreign key creation with the mentioned command.
- */
ColumnDef *columnDefinition = (ColumnDef *) command->def;
List *columnConstraints = columnDefinition->constraints;
@@ -3780,7 +3856,7 @@ SetupExecutionModeForAlterTable(Oid relationId, AlterTableCmd *command)
* applied. rightRelationId is the relation id of either index or distributed table which
* given command refers to.
*/
-static List *
+List *
InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
const char *commandString)
{
@@ -3878,25 +3954,29 @@ static void
SetInterShardDDLTaskPlacementList(Task *task, ShardInterval *leftShardInterval,
ShardInterval *rightShardInterval)
{
- Oid leftRelationId = leftShardInterval->relationId;
- Oid rightRelationId = rightShardInterval->relationId;
- if (IsCitusTableType(leftRelationId, REFERENCE_TABLE) &&
- IsCitusTableType(rightRelationId, CITUS_LOCAL_TABLE))
+ uint64 leftShardId = leftShardInterval->shardId;
+ List *leftShardPlacementList = ActiveShardPlacementList(leftShardId);
+
+ uint64 rightShardId = rightShardInterval->shardId;
+ List *rightShardPlacementList = ActiveShardPlacementList(rightShardId);
+
+ List *intersectedPlacementList = NIL;
+
+ ShardPlacement *leftShardPlacement = NULL;
+ foreach_ptr(leftShardPlacement, leftShardPlacementList)
{
- /*
- * If we are defining/dropping a foreign key from a reference table
- * to a citus local table, then we will execute ADD/DROP constraint
- * command only for coordinator placement of reference table.
- */
- uint64 leftShardId = leftShardInterval->shardId;
- task->taskPlacementList = ActiveShardPlacementListOnGroup(leftShardId,
- COORDINATOR_GROUP_ID);
- }
- else
- {
- uint64 leftShardId = leftShardInterval->shardId;
- task->taskPlacementList = ActiveShardPlacementList(leftShardId);
+ ShardPlacement *rightShardPlacement = NULL;
+ foreach_ptr(rightShardPlacement, rightShardPlacementList)
+ {
+ if (leftShardPlacement->nodeId == rightShardPlacement->nodeId)
+ {
+ intersectedPlacementList = lappend(intersectedPlacementList,
+ leftShardPlacement);
+ }
+ }
}
+
+ task->taskPlacementList = intersectedPlacementList;
}
diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c
index 0ddad70f5..7577dfd31 100644
--- a/src/backend/distributed/commands/trigger.c
+++ b/src/backend/distributed/commands/trigger.c
@@ -74,7 +74,7 @@ GetExplicitTriggerCommandList(Oid relationId)
{
List *createTriggerCommandList = NIL;
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
List *triggerIdList = GetExplicitTriggerIdList(relationId);
@@ -116,7 +116,7 @@ GetExplicitTriggerCommandList(Oid relationId)
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
return createTriggerCommandList;
}
@@ -249,7 +249,7 @@ GetExplicitTriggerIdList(Oid relationId)
ScanKeyData scanKey[1];
ScanKeyInit(&scanKey[0], Anum_pg_trigger_tgrelid,
- BTEqualStrategyNumber, F_OIDEQ, relationId);
+ BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
bool useIndex = true;
SysScanDesc scanDescriptor = systable_beginscan(pgTrigger, TriggerRelidNameIndexId,
diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c
index 70fee6bd5..4de518a06 100644
--- a/src/backend/distributed/commands/truncate.c
+++ b/src/backend/distributed/commands/truncate.c
@@ -182,7 +182,7 @@ truncate_local_data_after_distributing_table(PG_FUNCTION_ARGS)
TruncateStmt *truncateStmt = makeNode(TruncateStmt);
char *relationName = generate_qualified_relation_name(relationId);
- List *names = stringToQualifiedNameList(relationName);
+ List *names = stringToQualifiedNameList_compat(relationName);
truncateStmt->relations = list_make1(makeRangeVarFromNameList(names));
truncateStmt->restart_seqs = false;
truncateStmt->behavior = DROP_CASCADE;
diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c
index 24ca91aeb..02e5f0dee 100644
--- a/src/backend/distributed/commands/type.c
+++ b/src/backend/distributed/commands/type.c
@@ -187,7 +187,7 @@ RecreateCompositeTypeStmt(Oid typeOid)
Assert(get_typtype(typeOid) == TYPTYPE_COMPOSITE);
CompositeTypeStmt *stmt = makeNode(CompositeTypeStmt);
- List *names = stringToQualifiedNameList(format_type_be_qualified(typeOid));
+ List *names = stringToQualifiedNameList_compat(format_type_be_qualified(typeOid));
stmt->typevar = makeRangeVarFromNameList(names);
stmt->coldeflist = CompositeTypeColumnDefList(typeOid);
@@ -252,7 +252,7 @@ RecreateEnumStmt(Oid typeOid)
Assert(get_typtype(typeOid) == TYPTYPE_ENUM);
CreateEnumStmt *stmt = makeNode(CreateEnumStmt);
- stmt->typeName = stringToQualifiedNameList(format_type_be_qualified(typeOid));
+ stmt->typeName = stringToQualifiedNameList_compat(format_type_be_qualified(typeOid));
stmt->vals = EnumValsList(typeOid);
return stmt;
@@ -565,7 +565,8 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress)
char *
GenerateBackupNameForTypeCollision(const ObjectAddress *address)
{
- List *names = stringToQualifiedNameList(format_type_be_qualified(address->objectId));
+ List *names = stringToQualifiedNameList_compat(format_type_be_qualified(
+ address->objectId));
RangeVar *rel = makeRangeVarFromNameList(names);
char *newName = palloc0(NAMEDATALEN);
diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c
index 888b3dfed..10e424623 100644
--- a/src/backend/distributed/commands/utility_hook.c
+++ b/src/backend/distributed/commands/utility_hook.c
@@ -77,6 +77,7 @@
#include "tcop/utility.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
+#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/syscache.h"
@@ -193,6 +194,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
bool isCreateAlterExtensionUpdateCitusStmt = IsCreateAlterExtensionUpdateCitusStmt(
parsetree);
+
if (EnableVersionChecks && isCreateAlterExtensionUpdateCitusStmt)
{
ErrorIfUnstableCreateOrAlterExtensionStmt(parsetree);
@@ -207,6 +209,18 @@ multi_ProcessUtility(PlannedStmt *pstmt,
PreprocessCreateExtensionStmtForCitusColumnar(parsetree);
}
+ if (isCreateAlterExtensionUpdateCitusStmt || IsDropCitusExtensionStmt(parsetree))
+ {
+ /*
+ * Citus maintains a higher level cache. We use the cache invalidation mechanism
+ * of Postgres to achieve cache coherency between backends. Any change to citus
+ * extension should be made known to other backends. We do this by invalidating the
+ * relcache and therefore invoking the citus registered callback that invalidates
+ * the citus cache in other backends.
+ */
+ CacheInvalidateRelcacheAll();
+ }
+
/*
* Make sure that on DROP DATABASE we terminate the background daemon
* associated with it.
@@ -923,18 +937,10 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
foreach_ptr(address, addresses)
{
MarkObjectDistributed(address);
+ TrackPropagatedObject(address);
}
}
}
-
- if (!IsDropCitusExtensionStmt(parsetree) && !IsA(parsetree, DropdbStmt))
- {
- /*
- * Ensure value is valid, we can't do some checks during CREATE
- * EXTENSION. This is important to register some invalidation callbacks.
- */
- CitusHasBeenLoaded(); /* lgtm[cpp/return-value-ignored] */
- }
}
diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c
index 6bc76b7b8..ee03aeae1 100644
--- a/src/backend/distributed/commands/vacuum.c
+++ b/src/backend/distributed/commands/vacuum.c
@@ -42,6 +42,9 @@ typedef struct CitusVacuumParams
VacOptValue truncate;
VacOptValue index_cleanup;
int nworkers;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ int ring_size;
+#endif
} CitusVacuumParams;
/* Local functions forward declarations for processing distributed table commands */
@@ -318,13 +321,26 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
}
/* if no flags remain, exit early */
- if (vacuumFlags == 0 &&
- vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED &&
- vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED &&
- vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
- )
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (vacuumFlags & VACOPT_PROCESS_TOAST &&
+ vacuumFlags & VACOPT_PROCESS_MAIN)
{
- return vacuumPrefix->data;
+ /* process toast and process main are true by default */
+ if (((vacuumFlags & ~VACOPT_PROCESS_TOAST) & ~VACOPT_PROCESS_MAIN) == 0 &&
+ vacuumParams.ring_size == -1 &&
+#else
+ if (vacuumFlags & VACOPT_PROCESS_TOAST)
+ {
+ /* process toast is true by default */
+ if ((vacuumFlags & ~VACOPT_PROCESS_TOAST) == 0 &&
+#endif
+ vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED &&
+ vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED &&
+ vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
+ )
+ {
+ return vacuumPrefix->data;
+ }
}
/* otherwise, handle options */
@@ -360,11 +376,33 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
}
- if (vacuumFlags & VACOPT_PROCESS_TOAST)
+ if (!(vacuumFlags & VACOPT_PROCESS_TOAST))
{
- appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,");
+ appendStringInfoString(vacuumPrefix, "PROCESS_TOAST FALSE,");
}
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (!(vacuumFlags & VACOPT_PROCESS_MAIN))
+ {
+ appendStringInfoString(vacuumPrefix, "PROCESS_MAIN FALSE,");
+ }
+
+ if (vacuumFlags & VACOPT_SKIP_DATABASE_STATS)
+ {
+ appendStringInfoString(vacuumPrefix, "SKIP_DATABASE_STATS,");
+ }
+
+ if (vacuumFlags & VACOPT_ONLY_DATABASE_STATS)
+ {
+ appendStringInfoString(vacuumPrefix, "ONLY_DATABASE_STATS,");
+ }
+
+ if (vacuumParams.ring_size != -1)
+ {
+ appendStringInfo(vacuumPrefix, "BUFFER_USAGE_LIMIT %d,", vacuumParams.ring_size);
+ }
+#endif
+
if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED)
{
appendStringInfoString(vacuumPrefix,
@@ -499,7 +537,14 @@ VacuumStmtParams(VacuumStmt *vacstmt)
bool freeze = false;
bool full = false;
bool disable_page_skipping = false;
- bool process_toast = false;
+ bool process_toast = true;
+
+#if PG_VERSION_NUM >= PG_VERSION_16
+ bool process_main = true;
+ bool skip_database_stats = false;
+ bool only_database_stats = false;
+ params.ring_size = -1;
+#endif
/* Set default value */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
@@ -519,6 +564,13 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
skip_locked = defGetBoolean(opt);
}
+#if PG_VERSION_NUM >= PG_VERSION_16
+ else if (strcmp(opt->defname, "buffer_usage_limit") == 0)
+ {
+ char *vac_buffer_size = defGetString(opt);
+ parse_int(vac_buffer_size, ¶ms.ring_size, GUC_UNIT_KB, NULL);
+ }
+#endif
else if (!vacstmt->is_vacuumcmd)
{
ereport(ERROR,
@@ -543,6 +595,20 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
disable_page_skipping = defGetBoolean(opt);
}
+#if PG_VERSION_NUM >= PG_VERSION_16
+ else if (strcmp(opt->defname, "process_main") == 0)
+ {
+ process_main = defGetBoolean(opt);
+ }
+ else if (strcmp(opt->defname, "skip_database_stats") == 0)
+ {
+ skip_database_stats = defGetBoolean(opt);
+ }
+ else if (strcmp(opt->defname, "only_database_stats") == 0)
+ {
+ only_database_stats = defGetBoolean(opt);
+ }
+#endif
else if (strcmp(opt->defname, "process_toast") == 0)
{
process_toast = defGetBoolean(opt);
@@ -613,6 +679,11 @@ VacuumStmtParams(VacuumStmt *vacstmt)
(analyze ? VACOPT_ANALYZE : 0) |
(freeze ? VACOPT_FREEZE : 0) |
(full ? VACOPT_FULL : 0) |
+#if PG_VERSION_NUM >= PG_VERSION_16
+ (process_main ? VACOPT_PROCESS_MAIN : 0) |
+ (skip_database_stats ? VACOPT_SKIP_DATABASE_STATS : 0) |
+ (only_database_stats ? VACOPT_ONLY_DATABASE_STATS : 0) |
+#endif
(process_toast ? VACOPT_PROCESS_TOAST : 0) |
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
return params;
diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c
index 02d6815d9..7c4816144 100644
--- a/src/backend/distributed/commands/view.c
+++ b/src/backend/distributed/commands/view.c
@@ -479,10 +479,7 @@ AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid)
* Set search_path to NIL so that all objects outside of pg_catalog will be
* schema-prefixed.
*/
- OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
- PushOverrideSearchPath(overridePath);
+ int saveNestLevel = PushEmptySearchPath();
/*
* Push the transaction snapshot to be able to get vief definition with pg_get_viewdef
@@ -494,7 +491,7 @@ AppendViewDefinitionToCreateViewCommand(StringInfo buf, Oid viewOid)
char *viewDefinition = TextDatumGetCString(viewDefinitionDatum);
PopActiveSnapshot();
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
appendStringInfo(buf, "AS %s ", viewDefinition);
}
diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c
index 46e757bfe..9439b38c5 100644
--- a/src/backend/distributed/connection/connection_management.c
+++ b/src/backend/distributed/connection/connection_management.c
@@ -371,7 +371,7 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
*/
MultiConnection *connection = MemoryContextAllocZero(ConnectionContext,
sizeof(MultiConnection));
- connection->initilizationState = POOL_STATE_NOT_INITIALIZED;
+ connection->initializationState = POOL_STATE_NOT_INITIALIZED;
dlist_push_tail(entry->connections, &connection->connectionNode);
/* these two flags are by nature cannot happen at the same time */
@@ -417,7 +417,7 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
* We've already incremented the counter above, so we should decrement
* when we're done with the connection.
*/
- connection->initilizationState = POOL_STATE_COUNTER_INCREMENTED;
+ connection->initializationState = POOL_STATE_COUNTER_INCREMENTED;
StartConnectionEstablishment(connection, &key);
@@ -430,7 +430,7 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
}
/* fully initialized the connection, record it */
- connection->initilizationState = POOL_STATE_INITIALIZED;
+ connection->initializationState = POOL_STATE_INITIALIZED;
return connection;
}
@@ -486,7 +486,7 @@ FindAvailableConnection(dlist_head *connections, uint32 flags)
continue;
}
- if (connection->initilizationState != POOL_STATE_INITIALIZED)
+ if (connection->initializationState != POOL_STATE_INITIALIZED)
{
/*
* If the connection has not been initialized, it should not be
@@ -780,7 +780,7 @@ ShutdownConnection(MultiConnection *connection)
/*
- * MultiConnectionStatePoll executes a PQconnectPoll on the connection to progres the
+ * MultiConnectionStatePoll executes a PQconnectPoll on the connection to progress the
* connection establishment. The return value of this function indicates if the
* MultiConnectionPollState has been changed, which could require a change to the WaitEventSet
*/
@@ -1182,10 +1182,10 @@ CitusPQFinish(MultiConnection *connection)
}
/* behave idempotently, there is no gurantee that CitusPQFinish() is called once */
- if (connection->initilizationState >= POOL_STATE_COUNTER_INCREMENTED)
+ if (connection->initializationState >= POOL_STATE_COUNTER_INCREMENTED)
{
DecrementSharedConnectionCounter(connection->hostname, connection->port);
- connection->initilizationState = POOL_STATE_NOT_INITIALIZED;
+ connection->initializationState = POOL_STATE_NOT_INITIALIZED;
}
}
@@ -1482,7 +1482,7 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection
* from their application name.
*/
return (IsCitusInternalBackend() || IsRebalancerInternalBackend()) ||
- connection->initilizationState != POOL_STATE_INITIALIZED ||
+ connection->initializationState != POOL_STATE_INITIALIZED ||
cachedConnectionCount >= MaxCachedConnectionsPerWorker ||
connection->forceCloseAtTransactionEnd ||
PQstatus(connection->pgConn) != CONNECTION_OK ||
@@ -1541,7 +1541,7 @@ RestartConnection(MultiConnection *connection)
* Not that we have to do this because ShutdownConnection() sets the
* state to not initialized.
*/
- connection->initilizationState = POOL_STATE_INITIALIZED;
+ connection->initializationState = POOL_STATE_INITIALIZED;
connection->connectionState = MULTI_CONNECTION_CONNECTING;
}
diff --git a/src/backend/distributed/connection/locally_reserved_shared_connections.c b/src/backend/distributed/connection/locally_reserved_shared_connections.c
index 9f703dc65..e3f7cb628 100644
--- a/src/backend/distributed/connection/locally_reserved_shared_connections.c
+++ b/src/backend/distributed/connection/locally_reserved_shared_connections.c
@@ -14,7 +14,7 @@
* (b) Reserving connections, the logic that this
* file implements.
*
- * Finally, as the name already implies, once a node has reserved a shared
+ * Finally, as the name already implies, once a node has reserved a shared
* connection, it is guaranteed to have the right to establish a connection
* to the given remote node when needed.
*
@@ -505,7 +505,7 @@ IsReservationPossible(void)
/*
- * AllocateReservedConectionEntry allocates the required entry in the hash
+ * AllocateOrGetReservedConnectionEntry allocates the required entry in the hash
* map by HASH_ENTER. The function throws an error if it cannot allocate
* the entry.
*/
diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c
index 1dfd51781..15dd985ec 100644
--- a/src/backend/distributed/connection/remote_commands.c
+++ b/src/backend/distributed/connection/remote_commands.c
@@ -716,14 +716,14 @@ PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes)
Assert(PQisnonblocking(pgConn));
int copyState = PQputCopyData(pgConn, buffer, nbytes);
- if (copyState == -1)
+ if (copyState <= 0)
{
return false;
}
/*
* PQputCopyData may have queued up part of the data even if it managed
- * to send some of it succesfully. We provide back pressure by waiting
+ * to send some of it successfully. We provide back pressure by waiting
* until the socket is writable to prevent the internal libpq buffers
* from growing excessively.
*
diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c
index 82ad26756..fcd396fe4 100644
--- a/src/backend/distributed/connection/shared_connection_stats.c
+++ b/src/backend/distributed/connection/shared_connection_stats.c
@@ -339,7 +339,7 @@ TryToIncrementSharedConnectionCounter(const char *hostname, int port)
LockConnectionSharedMemory(LW_EXCLUSIVE);
/*
- * As the hash map is allocated in shared memory, it doesn't rely on palloc for
+ * As the hash map is allocated in shared memory, it doesn't rely on palloc for
* memory allocation, so we could get NULL via HASH_ENTER_NULL when there is no
* space in the shared memory. That's why we prefer continuing the execution
* instead of throwing an error.
@@ -440,7 +440,7 @@ IncrementSharedConnectionCounter(const char *hostname, int port)
LockConnectionSharedMemory(LW_EXCLUSIVE);
/*
- * As the hash map is allocated in shared memory, it doesn't rely on palloc for
+ * As the hash map is allocated in shared memory, it doesn't rely on palloc for
* memory allocation, so we could get NULL via HASH_ENTER_NULL. That's why we prefer
* continuing the execution instead of throwing an error.
*/
@@ -694,7 +694,7 @@ SharedConnectionStatsShmemInit(void)
ConditionVariableInit(&ConnectionStatsSharedState->waitersConditionVariable);
}
- /* allocate hash table */
+ /* allocate hash table */
SharedConnStatsHash =
ShmemInitHash("Shared Conn. Stats Hash", MaxWorkerNodesTracked,
MaxWorkerNodesTracked, &info, hashFlags);
diff --git a/src/backend/distributed/deparser/citus_grantutils.c b/src/backend/distributed/deparser/citus_grantutils.c
new file mode 100644
index 000000000..8e0dadff2
--- /dev/null
+++ b/src/backend/distributed/deparser/citus_grantutils.c
@@ -0,0 +1,110 @@
+#include "postgres.h"
+#include "lib/stringinfo.h"
+#include "nodes/parsenodes.h"
+#include "distributed/deparser.h"
+#include "distributed/citus_ruleutils.h"
+
+/*
+ * Append the 'WITH GRANT OPTION' clause to the given buffer if the given
+ * statement is a 'GRANT' statement and the grant option is specified.
+ */
+void
+AppendWithGrantOption(StringInfo buf, GrantStmt *stmt)
+{
+ if (stmt->is_grant && stmt->grant_option)
+ {
+ appendStringInfo(buf, " WITH GRANT OPTION");
+ }
+}
+
+
+/*
+ * Append the 'GRANT OPTION FOR' clause to the given buffer if the given
+ * statement is a 'REVOKE' statement and the grant option is specified.
+ */
+void
+AppendGrantOptionFor(StringInfo buf, GrantStmt *stmt)
+{
+ if (!stmt->is_grant && stmt->grant_option)
+ {
+ appendStringInfo(buf, "GRANT OPTION FOR ");
+ }
+}
+
+
+/*
+ * Append the 'RESTRICT' or 'CASCADE' clause to the given buffer if the given
+ * statement is a 'REVOKE' statement and the behavior is specified.
+ */
+void
+AppendGrantRestrictAndCascadeForRoleSpec(StringInfo buf, DropBehavior behavior, bool
+ isGrant)
+{
+ if (!isGrant)
+ {
+ if (behavior == DROP_RESTRICT)
+ {
+ appendStringInfo(buf, " RESTRICT");
+ }
+ else if (behavior == DROP_CASCADE)
+ {
+ appendStringInfo(buf, " CASCADE");
+ }
+ }
+}
+
+
+/*
+ * Append the 'RESTRICT' or 'CASCADE' clause to the given buffer using 'GrantStmt',
+ * if the given statement is a 'REVOKE' statement and the behavior is specified.
+ */
+void
+AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt)
+{
+ AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant);
+}
+
+
+/*
+ * Append the 'GRANTED BY' clause to the given buffer if the given statement is a
+ * 'GRANT' statement and the grantor is specified.
+ */
+void
+AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant)
+{
+ if (isGrant && grantor)
+ {
+ appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true));
+ }
+}
+
+
+/*
+ * Append the 'GRANTED BY' clause to the given buffer using 'GrantStmt',
+ * if the given statement is a 'GRANT' statement and the grantor is specified.
+ */
+void
+AppendGrantedByInGrant(StringInfo buf, GrantStmt *stmt)
+{
+ AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
+}
+
+
+void
+AppendGrantSharedPrefix(StringInfo buf, GrantStmt *stmt)
+{
+ appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
+ AppendGrantOptionFor(buf, stmt);
+ AppendGrantPrivileges(buf, stmt);
+}
+
+
+void
+AppendGrantSharedSuffix(StringInfo buf, GrantStmt *stmt)
+{
+ AppendGrantGrantees(buf, stmt);
+ AppendWithGrantOption(buf, stmt);
+ AppendGrantRestrictAndCascade(buf, stmt);
+ AppendGrantedByInGrant(buf, stmt);
+ appendStringInfo(buf, ";");
+}
diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c
index 6b865e061..220ea3ec7 100644
--- a/src/backend/distributed/deparser/citus_ruleutils.c
+++ b/src/backend/distributed/deparser/citus_ruleutils.c
@@ -818,7 +818,7 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
* Switch to empty search_path to deparse_index_columns to produce fully-
* qualified names in expressions.
*/
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
/* index column or expression list begins here */
appendStringInfoChar(buffer, '(');
@@ -855,7 +855,7 @@ deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid,
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
}
diff --git a/src/backend/distributed/deparser/citus_setutils.c b/src/backend/distributed/deparser/citus_setutils.c
new file mode 100644
index 000000000..481a2860b
--- /dev/null
+++ b/src/backend/distributed/deparser/citus_setutils.c
@@ -0,0 +1,174 @@
+#include "postgres.h"
+
+#include "pg_version_compat.h"
+
+#include "catalog/namespace.h"
+#include "lib/stringinfo.h"
+#include "nodes/parsenodes.h"
+#include "utils/builtins.h"
+
+#include "distributed/deparser.h"
+#include "distributed/citus_ruleutils.h"
+#include "commands/defrem.h"
+#include "distributed/log_utils.h"
+#include "parser/parse_type.h"
+#include "nodes/print.h"
+
+
+void AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt);
+
+/*
+ * AppendVarSetValueDb deparses a VariableSetStmt with VAR_SET_VALUE kind.
+ * It takes from flatten_set_variable_args in postgres's utils/misc/guc.c,
+ * however flatten_set_variable_args does not apply correct quoting.
+ */
+void
+AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt)
+{
+ ListCell *varArgCell = NULL;
+ ListCell *firstCell = list_head(setStmt->args);
+
+ Assert(setStmt->kind == VAR_SET_VALUE);
+
+ foreach(varArgCell, setStmt->args)
+ {
+ Node *varArgNode = lfirst(varArgCell);
+ A_Const *varArgConst = NULL;
+ TypeName *typeName = NULL;
+
+ if (IsA(varArgNode, A_Const))
+ {
+ varArgConst = (A_Const *) varArgNode;
+ }
+ else if (IsA(varArgNode, TypeCast))
+ {
+ TypeCast *varArgTypeCast = (TypeCast *) varArgNode;
+
+ varArgConst = castNode(A_Const, varArgTypeCast->arg);
+ typeName = varArgTypeCast->typeName;
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d", varArgNode->type);
+ }
+
+ /* don't know how to start SET until we inspect first arg */
+ if (varArgCell != firstCell)
+ {
+ appendStringInfoChar(buf, ',');
+ }
+ else if (typeName != NULL)
+ {
+ appendStringInfoString(buf, " SET TIME ZONE");
+ }
+ else
+ {
+ appendStringInfo(buf, " SET %s =", quote_identifier(setStmt->name));
+ }
+
+ Node *value = (Node *) &varArgConst->val;
+ switch (value->type)
+ {
+ case T_Integer:
+ {
+ appendStringInfo(buf, " %d", intVal(value));
+ break;
+ }
+
+ case T_Float:
+ {
+ appendStringInfo(buf, " %s", nodeToString(value));
+ break;
+ }
+
+ case T_String:
+ {
+ if (typeName != NULL)
+ {
+ /*
+ * Must be a ConstInterval argument for TIME ZONE. Coerce
+ * to interval and back to normalize the value and account
+ * for any typmod.
+ */
+ Oid typoid = InvalidOid;
+ int32 typmod = -1;
+
+ typenameTypeIdAndMod(NULL, typeName, &typoid, &typmod);
+ Assert(typoid == INTERVALOID);
+
+ Datum interval =
+ DirectFunctionCall3(interval_in,
+ CStringGetDatum(strVal(value)),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(typmod));
+
+ char *intervalout =
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ interval));
+ appendStringInfo(buf, " INTERVAL '%s'", intervalout);
+ }
+ else
+ {
+ appendStringInfo(buf, " %s", quote_literal_cstr(strVal(value)));
+ }
+ break;
+ }
+
+ default:
+ {
+ elog(ERROR, "Unexpected Value type in VAR_SET_VALUE arguments.");
+ break;
+ }
+ }
+ }
+}
+
+
+/*
+ * AppendVariableSetDb appends a string representing the VariableSetStmt to a buffer
+ */
+void
+AppendVariableSet(StringInfo buf, VariableSetStmt *setStmt)
+{
+ switch (setStmt->kind)
+ {
+ case VAR_SET_VALUE:
+ {
+ AppendVarSetValue(buf, setStmt);
+ break;
+ }
+
+ case VAR_SET_CURRENT:
+ {
+ appendStringInfo(buf, " SET %s FROM CURRENT", quote_identifier(
+ setStmt->name));
+ break;
+ }
+
+ case VAR_SET_DEFAULT:
+ {
+ appendStringInfo(buf, " SET %s TO DEFAULT", quote_identifier(setStmt->name));
+ break;
+ }
+
+ case VAR_RESET:
+ {
+ appendStringInfo(buf, " RESET %s", quote_identifier(setStmt->name));
+ break;
+ }
+
+ case VAR_RESET_ALL:
+ {
+ appendStringInfoString(buf, " RESET ALL");
+ break;
+ }
+
+ /* VAR_SET_MULTI is a special case for SET TRANSACTION that should not occur here */
+ case VAR_SET_MULTI:
+ default:
+ {
+ ereport(ERROR, (errmsg("Unable to deparse SET statement")));
+ break;
+ }
+ }
+}
diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c
index b72787993..d3d3ce633 100644
--- a/src/backend/distributed/deparser/deparse_database_stmts.c
+++ b/src/backend/distributed/deparser/deparse_database_stmts.c
@@ -18,11 +18,17 @@
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
-#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
+#include "distributed/citus_ruleutils.h"
+#include "commands/defrem.h"
+#include "distributed/deparser.h"
+#include "distributed/log_utils.h"
+#include "parser/parse_type.h"
+
static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
-
+static void AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt);
+static void AppendDefElemConnLimit(StringInfo buf, DefElem *def);
char *
DeparseAlterDatabaseOwnerStmt(Node *node)
@@ -49,3 +55,153 @@ AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
quote_identifier(strVal((String *) stmt->object)),
RoleSpecString(stmt->newowner, true));
}
+
+
+static void
+AppendGrantDatabases(StringInfo buf, GrantStmt *stmt)
+{
+ ListCell *cell = NULL;
+ appendStringInfo(buf, " ON DATABASE ");
+
+ foreach(cell, stmt->objects)
+ {
+ char *database = strVal(lfirst(cell));
+ appendStringInfoString(buf, quote_identifier(database));
+ if (cell != list_tail(stmt->objects))
+ {
+ appendStringInfo(buf, ", ");
+ }
+ }
+}
+
+
+static void
+AppendGrantOnDatabaseStmt(StringInfo buf, GrantStmt *stmt)
+{
+ Assert(stmt->objtype == OBJECT_DATABASE);
+
+ AppendGrantSharedPrefix(buf, stmt);
+
+ AppendGrantDatabases(buf, stmt);
+
+ AppendGrantSharedSuffix(buf, stmt);
+}
+
+
+static void
+AppendDefElemConnLimit(StringInfo buf, DefElem *def)
+{
+ appendStringInfo(buf, " CONNECTION LIMIT %ld", (long int) defGetNumeric(def));
+}
+
+
+static void
+AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt)
+{
+ appendStringInfo(buf, "ALTER DATABASE %s ", quote_identifier(stmt->dbname));
+
+ if (stmt->options)
+ {
+ ListCell *cell = NULL;
+ appendStringInfo(buf, "WITH ");
+ foreach(cell, stmt->options)
+ {
+ DefElem *def = castNode(DefElem, lfirst(cell));
+ if (strcmp(def->defname, "is_template") == 0)
+ {
+ appendStringInfo(buf, "IS_TEMPLATE %s",
+ quote_literal_cstr(strVal(def->arg)));
+ }
+ else if (strcmp(def->defname, "connection_limit") == 0)
+ {
+ AppendDefElemConnLimit(buf, def);
+ }
+ else if (strcmp(def->defname, "allow_connections") == 0)
+ {
+ ereport(ERROR,
+ errmsg("ALLOW_CONNECTIONS is not supported"));
+ }
+ else
+ {
+ ereport(ERROR,
+ errmsg("unrecognized ALTER DATABASE option: %s",
+ def->defname));
+ }
+ }
+ }
+
+ appendStringInfo(buf, ";");
+}
+
+
+char *
+DeparseGrantOnDatabaseStmt(Node *node)
+{
+ GrantStmt *stmt = castNode(GrantStmt, node);
+ Assert(stmt->objtype == OBJECT_DATABASE);
+
+ StringInfoData str = { 0 };
+ initStringInfo(&str);
+
+ AppendGrantOnDatabaseStmt(&str, stmt);
+
+ return str.data;
+}
+
+
+char *
+DeparseAlterDatabaseStmt(Node *node)
+{
+ AlterDatabaseStmt *stmt = castNode(AlterDatabaseStmt, node);
+
+ StringInfoData str = { 0 };
+ initStringInfo(&str);
+
+ AppendAlterDatabaseStmt(&str, stmt);
+
+ return str.data;
+}
+
+
+#if PG_VERSION_NUM >= PG_VERSION_15
+char *
+DeparseAlterDatabaseRefreshCollStmt(Node *node)
+{
+ AlterDatabaseRefreshCollStmt *stmt = (AlterDatabaseRefreshCollStmt *) node;
+
+ StringInfoData str;
+ initStringInfo(&str);
+
+ appendStringInfo(&str, "ALTER DATABASE %s REFRESH COLLATION VERSION;",
+ quote_identifier(
+ stmt->dbname));
+
+ return str.data;
+}
+
+
+#endif
+
+static void
+AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt)
+{
+ appendStringInfo(buf, "ALTER DATABASE %s", quote_identifier(stmt->dbname));
+
+ VariableSetStmt *varSetStmt = castNode(VariableSetStmt, stmt->setstmt);
+
+ AppendVariableSet(buf, varSetStmt);
+}
+
+
+char *
+DeparseAlterDatabaseSetStmt(Node *node)
+{
+ AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node);
+
+ StringInfoData str = { 0 };
+ initStringInfo(&str);
+
+ AppendAlterDatabaseSetStmt(&str, stmt);
+
+ return str.data;
+}
diff --git a/src/backend/distributed/deparser/deparse_domain_stmts.c b/src/backend/distributed/deparser/deparse_domain_stmts.c
index 9891e0532..e517074ec 100644
--- a/src/backend/distributed/deparser/deparse_domain_stmts.c
+++ b/src/backend/distributed/deparser/deparse_domain_stmts.c
@@ -345,9 +345,9 @@ AppendAlterDomainStmtSetDefault(StringInfo buf, AlterDomainStmt *stmt)
expr = TransformDefaultExpr(expr, stmt->typeName, baseTypeName);
/* deparse while the searchpath is cleared to force qualification of identifiers */
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
char *exprSql = deparse_expression(expr, NIL, true, true);
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
appendStringInfo(buf, "SET DEFAULT %s", exprSql);
}
@@ -443,9 +443,9 @@ AppendConstraint(StringInfo buf, Constraint *constraint, List *domainName,
elog(ERROR, "missing expression for domain constraint");
}
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
char *exprSql = deparse_expression(expr, NIL, true, true);
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
appendStringInfo(buf, " CHECK (%s)", exprSql);
return;
@@ -469,9 +469,9 @@ AppendConstraint(StringInfo buf, Constraint *constraint, List *domainName,
elog(ERROR, "missing expression for domain default");
}
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
char *exprSql = deparse_expression(expr, NIL, true, true);
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
appendStringInfo(buf, " DEFAULT %s", exprSql);
return;
diff --git a/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c b/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c
index b8f2574a4..3f755c905 100644
--- a/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c
+++ b/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c
@@ -21,7 +21,6 @@
static void AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt);
static void AppendGrantOnFDWNames(StringInfo buf, GrantStmt *stmt);
-
char *
DeparseGrantOnFDWStmt(Node *node)
{
@@ -41,36 +40,9 @@ static void
AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt)
{
Assert(stmt->objtype == OBJECT_FDW);
-
- appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
-
- if (!stmt->is_grant && stmt->grant_option)
- {
- appendStringInfo(buf, "GRANT OPTION FOR ");
- }
-
- AppendGrantPrivileges(buf, stmt);
-
+ AppendGrantSharedPrefix(buf, stmt);
AppendGrantOnFDWNames(buf, stmt);
-
- AppendGrantGrantees(buf, stmt);
-
- if (stmt->is_grant && stmt->grant_option)
- {
- appendStringInfo(buf, " WITH GRANT OPTION");
- }
- if (!stmt->is_grant)
- {
- if (stmt->behavior == DROP_RESTRICT)
- {
- appendStringInfo(buf, " RESTRICT");
- }
- else if (stmt->behavior == DROP_CASCADE)
- {
- appendStringInfo(buf, " CASCADE");
- }
- }
- appendStringInfo(buf, ";");
+ AppendGrantSharedSuffix(buf, stmt);
}
diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c
index c1d2de7e9..403569b94 100644
--- a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c
+++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c
@@ -298,36 +298,9 @@ static void
AppendGrantOnForeignServerStmt(StringInfo buf, GrantStmt *stmt)
{
Assert(stmt->objtype == OBJECT_FOREIGN_SERVER);
-
- appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
-
- if (!stmt->is_grant && stmt->grant_option)
- {
- appendStringInfo(buf, "GRANT OPTION FOR ");
- }
-
- AppendGrantPrivileges(buf, stmt);
-
+ AppendGrantSharedPrefix(buf, stmt);
AppendGrantOnForeignServerServers(buf, stmt);
-
- AppendGrantGrantees(buf, stmt);
-
- if (stmt->is_grant && stmt->grant_option)
- {
- appendStringInfo(buf, " WITH GRANT OPTION");
- }
- if (!stmt->is_grant)
- {
- if (stmt->behavior == DROP_RESTRICT)
- {
- appendStringInfo(buf, " RESTRICT");
- }
- else if (stmt->behavior == DROP_CASCADE)
- {
- appendStringInfo(buf, " CASCADE");
- }
- }
- appendStringInfo(buf, ";");
+ AppendGrantSharedSuffix(buf, stmt);
}
diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c
index 524a1928d..a5bc52e5a 100644
--- a/src/backend/distributed/deparser/deparse_function_stmts.c
+++ b/src/backend/distributed/deparser/deparse_function_stmts.c
@@ -61,7 +61,6 @@ static void AppendDefElemRows(StringInfo buf, DefElem *def);
static void AppendDefElemSet(StringInfo buf, DefElem *def);
static void AppendDefElemSupport(StringInfo buf, DefElem *def);
-static void AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt);
static void AppendRenameFunctionStmt(StringInfo buf, RenameStmt *stmt);
static void AppendAlterFunctionSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt);
static void AppendAlterFunctionOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
@@ -300,164 +299,6 @@ AppendDefElemSupport(StringInfo buf, DefElem *def)
}
-/*
- * AppendVariableSet appends a string representing the VariableSetStmt to a buffer
- */
-void
-AppendVariableSet(StringInfo buf, VariableSetStmt *setStmt)
-{
- switch (setStmt->kind)
- {
- case VAR_SET_VALUE:
- {
- AppendVarSetValue(buf, setStmt);
- break;
- }
-
- case VAR_SET_CURRENT:
- {
- appendStringInfo(buf, " SET %s FROM CURRENT", quote_identifier(
- setStmt->name));
- break;
- }
-
- case VAR_SET_DEFAULT:
- {
- appendStringInfo(buf, " SET %s TO DEFAULT", quote_identifier(setStmt->name));
- break;
- }
-
- case VAR_RESET:
- {
- appendStringInfo(buf, " RESET %s", quote_identifier(setStmt->name));
- break;
- }
-
- case VAR_RESET_ALL:
- {
- appendStringInfoString(buf, " RESET ALL");
- break;
- }
-
- /* VAR_SET_MULTI is a special case for SET TRANSACTION that should not occur here */
- case VAR_SET_MULTI:
- default:
- {
- ereport(ERROR, (errmsg("Unable to deparse SET statement")));
- break;
- }
- }
-}
-
-
-/*
- * AppendVarSetValue deparses a VariableSetStmt with VAR_SET_VALUE kind.
- * It takes from flatten_set_variable_args in postgres's utils/misc/guc.c,
- * however flatten_set_variable_args does not apply correct quoting.
- */
-static void
-AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt)
-{
- ListCell *varArgCell = NULL;
- ListCell *firstCell = list_head(setStmt->args);
-
- Assert(setStmt->kind == VAR_SET_VALUE);
-
- foreach(varArgCell, setStmt->args)
- {
- Node *varArgNode = lfirst(varArgCell);
- A_Const *varArgConst = NULL;
- TypeName *typeName = NULL;
-
- if (IsA(varArgNode, A_Const))
- {
- varArgConst = (A_Const *) varArgNode;
- }
- else if (IsA(varArgNode, TypeCast))
- {
- TypeCast *varArgTypeCast = (TypeCast *) varArgNode;
-
- varArgConst = castNode(A_Const, varArgTypeCast->arg);
- typeName = varArgTypeCast->typeName;
- }
- else
- {
- elog(ERROR, "unrecognized node type: %d", varArgNode->type);
- }
-
- /* don't know how to start SET until we inspect first arg */
- if (varArgCell != firstCell)
- {
- appendStringInfoChar(buf, ',');
- }
- else if (typeName != NULL)
- {
- appendStringInfoString(buf, " SET TIME ZONE");
- }
- else
- {
- appendStringInfo(buf, " SET %s =", quote_identifier(setStmt->name));
- }
-
- Node *value = (Node *) &varArgConst->val;
- switch (value->type)
- {
- case T_Integer:
- {
- appendStringInfo(buf, " %d", intVal(value));
- break;
- }
-
- case T_Float:
- {
- appendStringInfo(buf, " %s", strVal(value));
- break;
- }
-
- case T_String:
- {
- if (typeName != NULL)
- {
- /*
- * Must be a ConstInterval argument for TIME ZONE. Coerce
- * to interval and back to normalize the value and account
- * for any typmod.
- */
- Oid typoid = InvalidOid;
- int32 typmod = -1;
-
- typenameTypeIdAndMod(NULL, typeName, &typoid, &typmod);
- Assert(typoid == INTERVALOID);
-
- Datum interval =
- DirectFunctionCall3(interval_in,
- CStringGetDatum(strVal(value)),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(typmod));
-
- char *intervalout =
- DatumGetCString(DirectFunctionCall1(interval_out,
- interval));
- appendStringInfo(buf, " INTERVAL '%s'", intervalout);
- }
- else
- {
- appendStringInfo(buf, " %s", quote_literal_cstr(strVal(
- value)));
- }
- break;
- }
-
- default:
- {
- elog(ERROR, "Unexpected Value type in VAR_SET_VALUE arguments.");
- break;
- }
- }
- }
-}
-
-
/*
* DeparseRenameFunctionStmt builds and returns a string representing the RenameStmt
*/
@@ -749,35 +590,11 @@ AppendGrantOnFunctionStmt(StringInfo buf, GrantStmt *stmt)
"GRANT .. ALL FUNCTIONS/PROCEDURES IN SCHEMA is not supported for formatting.");
}
- appendStringInfoString(buf, stmt->is_grant ? "GRANT " : "REVOKE ");
-
- if (!stmt->is_grant && stmt->grant_option)
- {
- appendStringInfoString(buf, "GRANT OPTION FOR ");
- }
-
- AppendGrantPrivileges(buf, stmt);
+ AppendGrantSharedPrefix(buf, stmt);
AppendGrantOnFunctionFunctions(buf, stmt);
- AppendGrantGrantees(buf, stmt);
-
- if (stmt->is_grant && stmt->grant_option)
- {
- appendStringInfoString(buf, " WITH GRANT OPTION");
- }
- if (!stmt->is_grant)
- {
- if (stmt->behavior == DROP_RESTRICT)
- {
- appendStringInfoString(buf, " RESTRICT");
- }
- else if (stmt->behavior == DROP_CASCADE)
- {
- appendStringInfoString(buf, " CASCADE");
- }
- }
- appendStringInfoString(buf, ";");
+ AppendGrantSharedSuffix(buf, stmt);
}
diff --git a/src/backend/distributed/deparser/deparse_publication_stmts.c b/src/backend/distributed/deparser/deparse_publication_stmts.c
index deb8e7285..e22333146 100644
--- a/src/backend/distributed/deparser/deparse_publication_stmts.c
+++ b/src/backend/distributed/deparser/deparse_publication_stmts.c
@@ -307,11 +307,11 @@ AppendWhereClauseExpression(StringInfo buf, RangeVar *tableName,
List *relationContext = deparse_context_for(tableName->relname, relation->rd_id);
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
char *whereClauseString = deparse_expression(whereClause,
relationContext,
true, true);
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
appendStringInfoString(buf, whereClauseString);
diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c
index 0e9b300bb..4d41f8ec4 100644
--- a/src/backend/distributed/deparser/deparse_role_stmts.c
+++ b/src/backend/distributed/deparser/deparse_role_stmts.c
@@ -15,8 +15,10 @@
#include "pg_version_compat.h"
+#include "commands/defrem.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
+#include "distributed/listutils.h"
#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
@@ -28,6 +30,8 @@ static void AppendRoleOption(StringInfo buf, ListCell *optionCell);
static void AppendRoleList(StringInfo buf, List *roleList);
static void AppendDropRoleStmt(StringInfo buf, DropRoleStmt *stmt);
static void AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt);
+static void AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt);
+static void AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt);
/*
@@ -340,6 +344,66 @@ DeparseGrantRoleStmt(Node *node)
}
+/*
+ * Append the 'RESTRICT' or 'CASCADE' clause to the given buffer if the given
+ * statement is a 'REVOKE' statement and the behavior is specified.
+ * After PostgreSQL 16, the behavior is specified in the 'opt' field of
+ * GrantRoleStmt and may have multiple values.
+ * Here, compile time version is checked to support both versions.
+ */
+static void
+AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt)
+{
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (!stmt->is_grant)
+ {
+ DefElem *opt = NULL;
+ foreach_ptr(opt, stmt->opt)
+ {
+ if (strcmp(opt->defname, "admin") == 0)
+ {
+ appendStringInfo(buf, "ADMIN OPTION FOR ");
+ break;
+ }
+ }
+ }
+#else
+ if (!stmt->is_grant && stmt->admin_opt)
+ {
+ appendStringInfo(buf, "ADMIN OPTION FOR ");
+ }
+#endif
+}
+
+
+static void
+AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt)
+{
+ if (stmt->is_grant)
+ {
+#if PG_VERSION_NUM >= PG_VERSION_16
+ DefElem *opt = NULL;
+ foreach_ptr(opt, stmt->opt)
+ {
+ bool admin_option = false;
+ char *optval = defGetString(opt);
+ if (strcmp(opt->defname, "admin") == 0 &&
+ parse_bool(optval, &admin_option) && admin_option)
+ {
+ appendStringInfo(buf, " WITH ADMIN OPTION");
+ break;
+ }
+ }
+#else
+ if (stmt->admin_opt)
+ {
+ appendStringInfo(buf, " WITH ADMIN OPTION");
+ }
+#endif
+ }
+}
+
+
/*
* AppendGrantRoleStmt generates the string representation of the
* GrantRoleStmt and appends it to the buffer.
@@ -348,41 +412,15 @@ static void
AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt)
{
appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
-
- if (!stmt->is_grant && stmt->admin_opt)
- {
- appendStringInfo(buf, "ADMIN OPTION FOR ");
- }
-
+ AppendRevokeAdminOptionFor(buf, stmt);
AppendRoleList(buf, stmt->granted_roles);
-
appendStringInfo(buf, "%s ", stmt->is_grant ? " TO " : " FROM ");
-
AppendRoleList(buf, stmt->grantee_roles);
-
- if (stmt->is_grant)
- {
- if (stmt->admin_opt)
- {
- appendStringInfo(buf, " WITH ADMIN OPTION");
- }
-
- if (stmt->grantor)
- {
- appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(stmt->grantor, true));
- }
- }
- else
- {
- if (stmt->behavior == DROP_RESTRICT)
- {
- appendStringInfo(buf, " RESTRICT");
- }
- else if (stmt->behavior == DROP_CASCADE)
- {
- appendStringInfo(buf, " CASCADE");
- }
- }
+ AppendGrantWithAdminOption(buf, stmt);
+ AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
+ AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant);
+ AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant);
+ appendStringInfo(buf, ";");
}
diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c
index cf8bf3418..10317b899 100644
--- a/src/backend/distributed/deparser/deparse_schema_stmts.c
+++ b/src/backend/distributed/deparser/deparse_schema_stmts.c
@@ -178,35 +178,11 @@ AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt)
{
Assert(stmt->objtype == OBJECT_SCHEMA);
- appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
-
- if (!stmt->is_grant && stmt->grant_option)
- {
- appendStringInfo(buf, "GRANT OPTION FOR ");
- }
-
- AppendGrantPrivileges(buf, stmt);
+ AppendGrantSharedPrefix(buf, stmt);
AppendGrantOnSchemaSchemas(buf, stmt);
- AppendGrantGrantees(buf, stmt);
-
- if (stmt->is_grant && stmt->grant_option)
- {
- appendStringInfo(buf, " WITH GRANT OPTION");
- }
- if (!stmt->is_grant)
- {
- if (stmt->behavior == DROP_RESTRICT)
- {
- appendStringInfo(buf, " RESTRICT");
- }
- else if (stmt->behavior == DROP_CASCADE)
- {
- appendStringInfo(buf, " CASCADE");
- }
- }
- appendStringInfo(buf, ";");
+ AppendGrantSharedSuffix(buf, stmt);
}
diff --git a/src/backend/distributed/deparser/deparse_sequence_stmts.c b/src/backend/distributed/deparser/deparse_sequence_stmts.c
index de2afdeec..98488c160 100644
--- a/src/backend/distributed/deparser/deparse_sequence_stmts.c
+++ b/src/backend/distributed/deparser/deparse_sequence_stmts.c
@@ -389,35 +389,11 @@ AppendGrantOnSequenceStmt(StringInfo buf, GrantStmt *stmt)
"GRANT .. ALL SEQUENCES IN SCHEMA is not supported for formatting.");
}
- appendStringInfoString(buf, stmt->is_grant ? "GRANT " : "REVOKE ");
-
- if (!stmt->is_grant && stmt->grant_option)
- {
- appendStringInfoString(buf, "GRANT OPTION FOR ");
- }
-
- AppendGrantPrivileges(buf, stmt);
+ AppendGrantSharedPrefix(buf, stmt);
AppendGrantOnSequenceSequences(buf, stmt);
- AppendGrantGrantees(buf, stmt);
-
- if (stmt->is_grant && stmt->grant_option)
- {
- appendStringInfoString(buf, " WITH GRANT OPTION");
- }
- if (!stmt->is_grant)
- {
- if (stmt->behavior == DROP_RESTRICT)
- {
- appendStringInfoString(buf, " RESTRICT");
- }
- else if (stmt->behavior == DROP_CASCADE)
- {
- appendStringInfoString(buf, " CASCADE");
- }
- }
- appendStringInfoString(buf, ";");
+ AppendGrantSharedSuffix(buf, stmt);
}
diff --git a/src/backend/distributed/deparser/deparse_table_stmts.c b/src/backend/distributed/deparser/deparse_table_stmts.c
index 1d9ee1739..ff96d7fc3 100644
--- a/src/backend/distributed/deparser/deparse_table_stmts.c
+++ b/src/backend/distributed/deparser/deparse_table_stmts.c
@@ -11,6 +11,8 @@
*/
#include "postgres.h"
+#include "catalog/heap.h"
+#include "commands/defrem.h"
#include "distributed/commands.h"
#include "distributed/deparser.h"
#include "distributed/version_compat.h"
@@ -30,7 +32,8 @@ static void AppendAlterTableSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *st
static void AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt);
static void AppendAlterTableCmd(StringInfo buf, AlterTableCmd *alterTableCmd,
AlterTableStmt *stmt);
-static void AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd);
+static void AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd,
+ AlterTableStmt *stmt);
static void AppendAlterTableCmdDropConstraint(StringInfo buf,
AlterTableCmd *alterTableCmd);
@@ -142,13 +145,19 @@ AppendColumnNameList(StringInfo buf, List *columns)
/*
- * AppendAlterTableCmdAddConstraint builds the add constraint command for index constraints
- * in the ADD CONSTRAINT {PRIMARY KEY, UNIQUE, EXCLUSION} form and appends it to the buf.
+ * AppendAlterTableCmdConstraint builds a string required to create given
+ * constraint as part of an ADD CONSTRAINT or an ADD COLUMN subcommand,
+ * and appends it to the buf.
*/
static void
-AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
- AlterTableStmt *stmt)
+AppendAlterTableCmdConstraint(StringInfo buf, Constraint *constraint,
+ AlterTableStmt *stmt, AlterTableType subtype)
{
+ if (subtype != AT_AddConstraint && subtype != AT_AddColumn)
+ {
+ ereport(ERROR, (errmsg("Unsupported alter table subtype: %d", (int) subtype)));
+ }
+
/* Need to deparse the alter table constraint command only if we are adding a constraint name.*/
if (constraint->conname == NULL)
{
@@ -156,7 +165,15 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
"Constraint name can not be NULL when deparsing the constraint.")));
}
- appendStringInfoString(buf, " ADD CONSTRAINT ");
+ if (subtype == AT_AddConstraint)
+ {
+ appendStringInfoString(buf, " ADD CONSTRAINT ");
+ }
+ else
+ {
+ appendStringInfoString(buf, " CONSTRAINT ");
+ }
+
appendStringInfo(buf, "%s ", quote_identifier(constraint->conname));
/* postgres version >= PG15
@@ -184,7 +201,10 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
#endif
}
- AppendColumnNameList(buf, constraint->keys);
+ if (subtype == AT_AddConstraint)
+ {
+ AppendColumnNameList(buf, constraint->keys);
+ }
if (constraint->including != NULL)
{
@@ -192,6 +212,24 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
AppendColumnNameList(buf, constraint->including);
}
+
+ if (constraint->options != NIL)
+ {
+ appendStringInfoString(buf, " WITH(");
+
+ ListCell *defListCell;
+ foreach(defListCell, constraint->options)
+ {
+ DefElem *def = (DefElem *) lfirst(defListCell);
+
+ bool first = (defListCell == list_head(constraint->options));
+ appendStringInfo(buf, "%s%s=%s", first ? "" : ",",
+ quote_identifier(def->defname),
+ quote_literal_cstr(defGetString(def)));
+ }
+
+ appendStringInfoChar(buf, ')');
+ }
}
else if (constraint->contype == CONSTR_EXCLUSION)
{
@@ -240,6 +278,18 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
}
else if (constraint->contype == CONSTR_CHECK)
{
+ if (subtype == AT_AddColumn)
+ {
+ /*
+ * Preprocess should've rejected deparsing such an ALTER TABLE
+ * command but be on the safe side.
+ */
+ ereport(ERROR, (errmsg("cannot add check constraint to column by "
+ "using ADD COLUMN command"),
+ errhint("Consider using ALTER TABLE ... ADD CONSTRAINT "
+ "... CHECK command after adding the column")));
+ }
+
LOCKMODE lockmode = AlterTableGetLockLevel(stmt->cmds);
Oid leftRelationId = AlterTableLookupRelation(stmt, lockmode);
@@ -275,9 +325,12 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
}
else if (constraint->contype == CONSTR_FOREIGN)
{
- appendStringInfoString(buf, " FOREIGN KEY");
+ if (subtype == AT_AddConstraint)
+ {
+ appendStringInfoString(buf, " FOREIGN KEY");
- AppendColumnNameList(buf, constraint->fk_attrs);
+ AppendColumnNameList(buf, constraint->fk_attrs);
+ }
appendStringInfoString(buf, " REFERENCES");
@@ -379,12 +432,32 @@ AppendAlterTableCmdAddConstraint(StringInfo buf, Constraint *constraint,
}
}
- /* FOREIGN KEY and CHECK constraints migth have NOT VALID option */
- if (constraint->skip_validation)
+ /*
+ * For ADD CONSTRAINT subcommand, FOREIGN KEY and CHECK constraints migth
+ * have NOT VALID option.
+ *
+ * Note that skip_validation might be true for an ADD COLUMN too but this
+ * is not because Postgres supports this but because Citus sets this flag
+ * to true for foreign key constraints added via ADD COLUMN. So we don't
+ * check for skip_validation for ADD COLUMN subcommand.
+ */
+ if (subtype == AT_AddConstraint && constraint->skip_validation)
{
appendStringInfoString(buf, " NOT VALID ");
}
+ if (subtype == AT_AddColumn &&
+ (constraint->deferrable || constraint->initdeferred))
+ {
+ /*
+ * For ADD COLUMN subcommand, the fact that whether given constraint
+ * is deferrable or initially deferred is indicated by another Constraint
+ * object, not via deferrable / initdeferred fields.
+ */
+ ereport(ERROR, (errmsg("unexpected value set for deferrable/initdeferred "
+ "field for an ADD COLUMN subcommand")));
+ }
+
if (constraint->deferrable)
{
appendStringInfoString(buf, " DEFERRABLE");
@@ -409,7 +482,7 @@ AppendAlterTableCmd(StringInfo buf, AlterTableCmd *alterTableCmd, AlterTableStmt
{
case AT_AddColumn:
{
- AppendAlterTableCmdAddColumn(buf, alterTableCmd);
+ AppendAlterTableCmdAddColumn(buf, alterTableCmd, stmt);
break;
}
@@ -429,7 +502,7 @@ AppendAlterTableCmd(StringInfo buf, AlterTableCmd *alterTableCmd, AlterTableStmt
*/
if (ConstrTypeCitusCanDefaultName(constraint->contype))
{
- AppendAlterTableCmdAddConstraint(buf, constraint, stmt);
+ AppendAlterTableCmdConstraint(buf, constraint, stmt, AT_AddConstraint);
break;
}
}
@@ -445,28 +518,81 @@ AppendAlterTableCmd(StringInfo buf, AlterTableCmd *alterTableCmd, AlterTableStmt
}
+/*
+ * GeneratedWhenStr returns the char representation of given generated_when
+ * value.
+ */
+static const char *
+GeneratedWhenStr(char generatedWhen)
+{
+ switch (generatedWhen)
+ {
+ case 'a':
+ {
+ return "ALWAYS";
+ }
+
+ case 'd':
+ {
+ return "BY DEFAULT";
+ }
+
+ default:
+ ereport(ERROR, (errmsg("unrecognized generated_when: %d",
+ generatedWhen)));
+ }
+}
+
+
+/*
+ * DeparseRawExprForColumnDefault returns string representation of given
+ * rawExpr based on given column type information.
+ */
+static char *
+DeparseRawExprForColumnDefault(Oid relationId, Oid columnTypeId, int32 columnTypeMod,
+ char *columnName, char attgenerated, Node *rawExpr)
+{
+ ParseState *pstate = make_parsestate(NULL);
+ Relation relation = RelationIdGetRelation(relationId);
+ AddRangeTableEntryToQueryCompat(pstate, relation);
+
+ Node *defaultExpr = cookDefault(pstate, rawExpr,
+ columnTypeId, columnTypeMod,
+ columnName, attgenerated);
+
+ List *deparseContext = deparse_context_for(get_rel_name(relationId), relationId);
+
+ int saveNestLevel = PushEmptySearchPath();
+ char *defaultExprStr = deparse_expression(defaultExpr, deparseContext, false, false);
+ PopEmptySearchPath(saveNestLevel);
+
+ RelationClose(relation);
+
+ return defaultExprStr;
+}
+
+
/*
* AppendAlterTableCmd builds and appends to the given buffer an AT_AddColumn command
* from given AlterTableCmd object in the form ADD COLUMN ...
*/
static void
-AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd)
+AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd,
+ AlterTableStmt *stmt)
{
Assert(alterTableCmd->subtype == AT_AddColumn);
+ Oid relationId = AlterTableLookupRelation(stmt, NoLock);
+
appendStringInfoString(buf, " ADD COLUMN ");
- ColumnDef *columnDefinition = (ColumnDef *) alterTableCmd->def;
-
- /*
- * the way we use the deparser now, constraints are always NULL
- * adding this check for ColumnDef consistency
- */
- if (columnDefinition->constraints != NULL)
+ if (alterTableCmd->missing_ok)
{
- ereport(ERROR, (errmsg("Constraints are not supported for AT_AddColumn")));
+ appendStringInfoString(buf, "IF NOT EXISTS ");
}
+ ColumnDef *columnDefinition = (ColumnDef *) alterTableCmd->def;
+
if (columnDefinition->colname)
{
appendStringInfo(buf, "%s ", quote_identifier(columnDefinition->colname));
@@ -478,23 +604,91 @@ AppendAlterTableCmdAddColumn(StringInfo buf, AlterTableCmd *alterTableCmd)
typenameTypeIdAndMod(NULL, columnDefinition->typeName, &typeOid, &typmod);
appendStringInfo(buf, "%s", format_type_extended(typeOid, typmod,
formatFlags));
- if (columnDefinition->is_not_null)
+
+ if (columnDefinition->compression)
{
- appendStringInfoString(buf, " NOT NULL");
+ appendStringInfo(buf, " COMPRESSION %s",
+ quote_identifier(columnDefinition->compression));
}
- /*
- * the way we use the deparser now, collation is never used
- * since the data type of columns that use sequences for default
- * are only int,smallint and bigint (never text, varchar, char)
- * Adding this part only for ColumnDef consistency
- */
Oid collationOid = GetColumnDefCollation(NULL, columnDefinition, typeOid);
if (OidIsValid(collationOid))
{
const char *identifier = FormatCollateBEQualified(collationOid);
appendStringInfo(buf, " COLLATE %s", identifier);
}
+
+ ListCell *constraintCell = NULL;
+ foreach(constraintCell, columnDefinition->constraints)
+ {
+ Constraint *constraint = (Constraint *) lfirst(constraintCell);
+
+ if (constraint->contype == CONSTR_NOTNULL)
+ {
+ appendStringInfoString(buf, " NOT NULL");
+ }
+ else if (constraint->contype == CONSTR_NULL)
+ {
+ appendStringInfoString(buf, " NULL");
+ }
+ else if (constraint->contype == CONSTR_DEFAULT)
+ {
+ char attgenerated = '\0';
+ appendStringInfo(buf, " DEFAULT %s",
+ DeparseRawExprForColumnDefault(relationId, typeOid, typmod,
+ columnDefinition->colname,
+ attgenerated,
+ constraint->raw_expr));
+ }
+ else if (constraint->contype == CONSTR_IDENTITY)
+ {
+ /*
+ * Citus doesn't support adding identity columns via ALTER TABLE,
+ * so we don't bother teaching the deparser about them.
+ */
+ ereport(ERROR, (errmsg("unexpectedly found identity column "
+ "definition in ALTER TABLE command")));
+ }
+ else if (constraint->contype == CONSTR_GENERATED)
+ {
+ char attgenerated = 's';
+ appendStringInfo(buf, " GENERATED %s AS (%s) STORED",
+ GeneratedWhenStr(constraint->generated_when),
+ DeparseRawExprForColumnDefault(relationId, typeOid, typmod,
+ columnDefinition->colname,
+ attgenerated,
+ constraint->raw_expr));
+ }
+ else if (constraint->contype == CONSTR_CHECK ||
+ constraint->contype == CONSTR_PRIMARY ||
+ constraint->contype == CONSTR_UNIQUE ||
+ constraint->contype == CONSTR_EXCLUSION ||
+ constraint->contype == CONSTR_FOREIGN)
+ {
+ AppendAlterTableCmdConstraint(buf, constraint, stmt, AT_AddColumn);
+ }
+ else if (constraint->contype == CONSTR_ATTR_DEFERRABLE)
+ {
+ appendStringInfoString(buf, " DEFERRABLE");
+ }
+ else if (constraint->contype == CONSTR_ATTR_NOT_DEFERRABLE)
+ {
+ appendStringInfoString(buf, " NOT DEFERRABLE");
+ }
+ else if (constraint->contype == CONSTR_ATTR_DEFERRED)
+ {
+ appendStringInfoString(buf, " INITIALLY DEFERRED");
+ }
+ else if (constraint->contype == CONSTR_ATTR_IMMEDIATE)
+ {
+ appendStringInfoString(buf, " INITIALLY IMMEDIATE");
+ }
+ else
+ {
+ ereport(ERROR, (errmsg("unsupported constraint type"),
+ errdetail("constraint type: %d", constraint->contype)));
+ }
+ }
}
diff --git a/src/backend/distributed/deparser/ruleutils_14.c b/src/backend/distributed/deparser/ruleutils_14.c
index b364221d8..6ab124537 100644
--- a/src/backend/distributed/deparser/ruleutils_14.c
+++ b/src/backend/distributed/deparser/ruleutils_14.c
@@ -53,6 +53,7 @@
#include "common/keywords.h"
#include "distributed/citus_nodefuncs.h"
#include "distributed/citus_ruleutils.h"
+#include "distributed/namespace_utils.h"
#include "executor/spi.h"
#include "foreign/foreign.h"
#include "funcapi.h"
@@ -610,18 +611,14 @@ pg_get_rule_expr(Node *expression)
{
bool showImplicitCasts = true;
deparse_context context;
- OverrideSearchPath *overridePath = NULL;
StringInfo buffer = makeStringInfo();
/*
* Set search_path to NIL so that all objects outside of pg_catalog will be
* schema-prefixed. pg_catalog will be added automatically when we call
- * PushOverrideSearchPath(), since we set addCatalog to true;
+ * PushEmptySearchPath().
*/
- overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
- PushOverrideSearchPath(overridePath);
+ int saveNestLevel = PushEmptySearchPath();
context.buf = buffer;
context.namespaces = NIL;
@@ -638,7 +635,7 @@ pg_get_rule_expr(Node *expression)
get_rule_expr(expression, &context, showImplicitCasts);
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
return buffer->data;
}
@@ -1955,8 +1952,6 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
deparse_context context;
deparse_namespace dpns;
- OverrideSearchPath *overridePath = NULL;
-
/* Guard against excessively long or deeply-nested queries */
CHECK_FOR_INTERRUPTS();
check_stack_depth();
@@ -1975,12 +1970,9 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
/*
* Set search_path to NIL so that all objects outside of pg_catalog will be
* schema-prefixed. pg_catalog will be added automatically when we call
- * PushOverrideSearchPath(), since we set addCatalog to true;
+ * PushEmptySearchPath().
*/
- overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
- PushOverrideSearchPath(overridePath);
+ int saveNestLevel = PushEmptySearchPath();
context.buf = buf;
context.namespaces = lcons(&dpns, list_copy(parentnamespace));
@@ -2031,7 +2023,7 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
}
/* ----------
diff --git a/src/backend/distributed/deparser/ruleutils_15.c b/src/backend/distributed/deparser/ruleutils_15.c
index 2dded9b01..755e0f4cd 100644
--- a/src/backend/distributed/deparser/ruleutils_15.c
+++ b/src/backend/distributed/deparser/ruleutils_15.c
@@ -54,6 +54,7 @@
#include "distributed/citus_nodefuncs.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/multi_router_planner.h"
+#include "distributed/namespace_utils.h"
#include "executor/spi.h"
#include "foreign/foreign.h"
#include "funcapi.h"
@@ -624,18 +625,14 @@ pg_get_rule_expr(Node *expression)
{
bool showImplicitCasts = true;
deparse_context context;
- OverrideSearchPath *overridePath = NULL;
StringInfo buffer = makeStringInfo();
/*
* Set search_path to NIL so that all objects outside of pg_catalog will be
* schema-prefixed. pg_catalog will be added automatically when we call
- * PushOverrideSearchPath(), since we set addCatalog to true;
+ * PushEmptySearchPath(), since we set addCatalog to true;
*/
- overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
- PushOverrideSearchPath(overridePath);
+ int saveNestLevel = PushEmptySearchPath();
context.buf = buffer;
context.namespaces = NIL;
@@ -652,7 +649,7 @@ pg_get_rule_expr(Node *expression)
get_rule_expr(expression, &context, showImplicitCasts);
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
return buffer->data;
}
@@ -2038,8 +2035,6 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
deparse_context context;
deparse_namespace dpns;
- OverrideSearchPath *overridePath = NULL;
-
/* Guard against excessively long or deeply-nested queries */
CHECK_FOR_INTERRUPTS();
check_stack_depth();
@@ -2058,12 +2053,9 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
/*
* Set search_path to NIL so that all objects outside of pg_catalog will be
* schema-prefixed. pg_catalog will be added automatically when we call
- * PushOverrideSearchPath(), since we set addCatalog to true;
+ * PushEmptySearchPath().
*/
- overridePath = GetOverrideSearchPath(CurrentMemoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
- PushOverrideSearchPath(overridePath);
+ int saveNestLevel = PushEmptySearchPath();
context.buf = buf;
context.namespaces = lcons(&dpns, list_copy(parentnamespace));
@@ -2118,7 +2110,7 @@ get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
}
/* ----------
diff --git a/src/backend/distributed/deparser/ruleutils_16.c b/src/backend/distributed/deparser/ruleutils_16.c
new file mode 100644
index 000000000..31e8823b1
--- /dev/null
+++ b/src/backend/distributed/deparser/ruleutils_16.c
@@ -0,0 +1,9185 @@
+/*-------------------------------------------------------------------------
+ *
+ * ruleutils_16.c
+ * Functions to convert stored expressions/querytrees back to
+ * source text
+ *
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/distributed/deparser/ruleutils_16.c
+ *
+ * This needs to be closely in sync with the core code.
+ *-------------------------------------------------------------------------
+ */
+#include "distributed/pg_version_constants.h"
+
+#include "pg_config.h"
+
+#if (PG_VERSION_NUM >= PG_VERSION_16) && (PG_VERSION_NUM < PG_VERSION_17)
+
+#include "postgres.h"
+
+#include
+#include
+#include
+
+#include "access/amapi.h"
+#include "access/htup_details.h"
+#include "access/relation.h"
+#include "access/sysattr.h"
+#include "access/table.h"
+#include "catalog/pg_aggregate.h"
+#include "catalog/pg_am.h"
+#include "catalog/pg_authid.h"
+#include "catalog/pg_collation.h"
+#include "catalog/pg_constraint.h"
+#include "catalog/pg_depend.h"
+#include "catalog/pg_extension.h"
+#include "catalog/pg_foreign_data_wrapper.h"
+#include "catalog/pg_language.h"
+#include "catalog/pg_opclass.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_partitioned_table.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_statistic_ext.h"
+#include "catalog/pg_trigger.h"
+#include "catalog/pg_type.h"
+#include "commands/defrem.h"
+#include "commands/extension.h"
+#include "commands/tablespace.h"
+#include "common/keywords.h"
+#include "distributed/citus_nodefuncs.h"
+#include "distributed/citus_ruleutils.h"
+#include "distributed/multi_router_planner.h"
+#include "distributed/namespace_utils.h"
+#include "executor/spi.h"
+#include "foreign/foreign.h"
+#include "funcapi.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "nodes/pathnodes.h"
+#include "optimizer/optimizer.h"
+#include "parser/parse_node.h"
+#include "parser/parse_agg.h"
+#include "parser/parse_func.h"
+#include "parser/parse_node.h"
+#include "parser/parse_oper.h"
+#include "parser/parse_relation.h"
+#include "parser/parser.h"
+#include "parser/parsetree.h"
+#include "rewrite/rewriteHandler.h"
+#include "rewrite/rewriteManip.h"
+#include "rewrite/rewriteSupport.h"
+#include "utils/array.h"
+#include "utils/builtins.h"
+#include "utils/fmgroids.h"
+#include "utils/hsearch.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+#include "utils/ruleutils.h"
+#include "utils/snapmgr.h"
+#include "utils/syscache.h"
+#include "utils/typcache.h"
+#include "utils/varlena.h"
+#include "utils/xml.h"
+
+
+/* ----------
+ * Pretty formatting constants
+ * ----------
+ */
+
+/* Indent counts */
+#define PRETTYINDENT_STD 8
+#define PRETTYINDENT_JOIN 4
+#define PRETTYINDENT_VAR 4
+
+#define PRETTYINDENT_LIMIT 40 /* wrap limit */
+
+/* Pretty flags */
+#define PRETTYFLAG_PAREN 0x0001
+#define PRETTYFLAG_INDENT 0x0002
+
+/* Default line length for pretty-print wrapping: 0 means wrap always */
+#define WRAP_COLUMN_DEFAULT 0
+
+/* macros to test if pretty action needed */
+#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
+#define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT)
+
+
+/* ----------
+ * Local data types
+ * ----------
+ */
+
+/* Context info needed for invoking a recursive querytree display routine */
+typedef struct
+{
+ StringInfo buf; /* output buffer to append to */
+ List *namespaces; /* List of deparse_namespace nodes */
+ List *windowClause; /* Current query level's WINDOW clause */
+ List *windowTList; /* targetlist for resolving WINDOW clause */
+ int prettyFlags; /* enabling of pretty-print functions */
+ int wrapColumn; /* max line length, or -1 for no limit */
+ int indentLevel; /* current indent level for prettyprint */
+ bool varprefix; /* true to print prefixes on Vars */
+ Oid distrelid; /* the distributed table being modified, if valid */
+ int64 shardid; /* a distributed table's shardid, if positive */
+ ParseExprKind special_exprkind; /* set only for exprkinds needing special
+ * handling */
+ Bitmapset *appendparents; /* if not null, map child Vars of these relids
+ * back to the parent rel */
+} deparse_context;
+
+/*
+ * Each level of query context around a subtree needs a level of Var namespace.
+ * A Var having varlevelsup=N refers to the N'th item (counting from 0) in
+ * the current context's namespaces list.
+ *
+ * The rangetable is the list of actual RTEs from the query tree, and the
+ * cte list is the list of actual CTEs.
+ *
+ * rtable_names holds the alias name to be used for each RTE (either a C
+ * string, or NULL for nameless RTEs such as unnamed joins).
+ * rtable_columns holds the column alias names to be used for each RTE.
+ *
+ * In some cases we need to make names of merged JOIN USING columns unique
+ * across the whole query, not only per-RTE. If so, unique_using is true
+ * and using_names is a list of C strings representing names already assigned
+ * to USING columns.
+ *
+ * When deparsing plan trees, there is always just a single item in the
+ * deparse_namespace list (since a plan tree never contains Vars with
+ * varlevelsup > 0). We store the PlanState node that is the immediate
+ * parent of the expression to be deparsed, as well as a list of that
+ * PlanState's ancestors. In addition, we store its outer and inner subplan
+ * state nodes, as well as their plan nodes' targetlists, and the index tlist
+ * if the current plan node might contain INDEX_VAR Vars. (These fields could
+ * be derived on-the-fly from the current PlanState, but it seems notationally
+ * clearer to set them up as separate fields.)
+ */
+typedef struct
+{
+ List *rtable; /* List of RangeTblEntry nodes */
+ List *rtable_names; /* Parallel list of names for RTEs */
+ List *rtable_columns; /* Parallel list of deparse_columns structs */
+ List *subplans; /* List of Plan trees for SubPlans */
+ List *ctes; /* List of CommonTableExpr nodes */
+ AppendRelInfo **appendrels; /* Array of AppendRelInfo nodes, or NULL */
+ /* Workspace for column alias assignment: */
+ bool unique_using; /* Are we making USING names globally unique */
+ List *using_names; /* List of assigned names for USING columns */
+ /* Remaining fields are used only when deparsing a Plan tree: */
+ Plan *plan; /* immediate parent of current expression */
+ List *ancestors; /* ancestors of planstate */
+ Plan *outer_plan; /* outer subnode, or NULL if none */
+ Plan *inner_plan; /* inner subnode, or NULL if none */
+ List *outer_tlist; /* referent for OUTER_VAR Vars */
+ List *inner_tlist; /* referent for INNER_VAR Vars */
+ List *index_tlist; /* referent for INDEX_VAR Vars */
+ /* Special namespace representing a function signature: */
+ char *funcname;
+ int numargs;
+ char **argnames;
+} deparse_namespace;
+
+/* Callback signature for resolve_special_varno() */
+typedef void (*rsv_callback) (Node *node, deparse_context *context,
+ void *callback_arg);
+
+/*
+ * Per-relation data about column alias names.
+ *
+ * Selecting aliases is unreasonably complicated because of the need to dump
+ * rules/views whose underlying tables may have had columns added, deleted, or
+ * renamed since the query was parsed. We must nonetheless print the rule/view
+ * in a form that can be reloaded and will produce the same results as before.
+ *
+ * For each RTE used in the query, we must assign column aliases that are
+ * unique within that RTE. SQL does not require this of the original query,
+ * but due to factors such as *-expansion we need to be able to uniquely
+ * reference every column in a decompiled query. As long as we qualify all
+ * column references, per-RTE uniqueness is sufficient for that.
+ *
+ * However, we can't ensure per-column name uniqueness for unnamed join RTEs,
+ * since they just inherit column names from their input RTEs, and we can't
+ * rename the columns at the join level. Most of the time this isn't an issue
+ * because we don't need to reference the join's output columns as such; we
+ * can reference the input columns instead. That approach can fail for merged
+ * JOIN USING columns, however, so when we have one of those in an unnamed
+ * join, we have to make that column's alias globally unique across the whole
+ * query to ensure it can be referenced unambiguously.
+ *
+ * Another problem is that a JOIN USING clause requires the columns to be
+ * merged to have the same aliases in both input RTEs, and that no other
+ * columns in those RTEs or their children conflict with the USING names.
+ * To handle that, we do USING-column alias assignment in a recursive
+ * traversal of the query's jointree. When descending through a JOIN with
+ * USING, we preassign the USING column names to the child columns, overriding
+ * other rules for column alias assignment. We also mark each RTE with a list
+ * of all USING column names selected for joins containing that RTE, so that
+ * when we assign other columns' aliases later, we can avoid conflicts.
+ *
+ * Another problem is that if a JOIN's input tables have had columns added or
+ * deleted since the query was parsed, we must generate a column alias list
+ * for the join that matches the current set of input columns --- otherwise, a
+ * change in the number of columns in the left input would throw off matching
+ * of aliases to columns of the right input. Thus, positions in the printable
+ * column alias list are not necessarily one-for-one with varattnos of the
+ * JOIN, so we need a separate new_colnames[] array for printing purposes.
+ */
+typedef struct
+{
+ /*
+ * colnames is an array containing column aliases to use for columns that
+ * existed when the query was parsed. Dropped columns have NULL entries.
+ * This array can be directly indexed by varattno to get a Var's name.
+ *
+ * Non-NULL entries are guaranteed unique within the RTE, *except* when
+ * this is for an unnamed JOIN RTE. In that case we merely copy up names
+ * from the two input RTEs.
+ *
+ * During the recursive descent in set_using_names(), forcible assignment
+ * of a child RTE's column name is represented by pre-setting that element
+ * of the child's colnames array. So at that stage, NULL entries in this
+ * array just mean that no name has been preassigned, not necessarily that
+ * the column is dropped.
+ */
+ int num_cols; /* length of colnames[] array */
+ char **colnames; /* array of C strings and NULLs */
+
+ /*
+ * new_colnames is an array containing column aliases to use for columns
+ * that would exist if the query was re-parsed against the current
+ * definitions of its base tables. This is what to print as the column
+ * alias list for the RTE. This array does not include dropped columns,
+ * but it will include columns added since original parsing. Indexes in
+ * it therefore have little to do with current varattno values. As above,
+ * entries are unique unless this is for an unnamed JOIN RTE. (In such an
+ * RTE, we never actually print this array, but we must compute it anyway
+ * for possible use in computing column names of upper joins.) The
+ * parallel array is_new_col marks which of these columns are new since
+ * original parsing. Entries with is_new_col false must match the
+ * non-NULL colnames entries one-for-one.
+ */
+ int num_new_cols; /* length of new_colnames[] array */
+ char **new_colnames; /* array of C strings */
+ bool *is_new_col; /* array of bool flags */
+
+ /* This flag tells whether we should actually print a column alias list */
+ bool printaliases;
+
+ /* This list has all names used as USING names in joins above this RTE */
+ List *parentUsing; /* names assigned to parent merged columns */
+
+ /*
+ * If this struct is for a JOIN RTE, we fill these fields during the
+ * set_using_names() pass to describe its relationship to its child RTEs.
+ *
+ * leftattnos and rightattnos are arrays with one entry per existing
+ * output column of the join (hence, indexable by join varattno). For a
+ * simple reference to a column of the left child, leftattnos[i] is the
+ * child RTE's attno and rightattnos[i] is zero; and conversely for a
+ * column of the right child. But for merged columns produced by JOIN
+ * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero.
+ * Also, if the column has been dropped, both are zero.
+ *
+ * If it's a JOIN USING, usingNames holds the alias names selected for the
+ * merged columns (these might be different from the original USING list,
+ * if we had to modify names to achieve uniqueness).
+ */
+ int leftrti; /* rangetable index of left child */
+ int rightrti; /* rangetable index of right child */
+ int *leftattnos; /* left-child varattnos of join cols, or 0 */
+ int *rightattnos; /* right-child varattnos of join cols, or 0 */
+ List *usingNames; /* names assigned to merged columns */
+} deparse_columns;
+
+/* This macro is analogous to rt_fetch(), but for deparse_columns structs */
+#define deparse_columns_fetch(rangetable_index, dpns) \
+ ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1))
+
+/*
+ * Entry in set_rtable_names' hash table
+ */
+typedef struct
+{
+ char name[NAMEDATALEN]; /* Hash key --- must be first */
+ int counter; /* Largest addition used so far for name */
+} NameHashEntry;
+
+
+/* ----------
+ * Local functions
+ *
+ * Most of these functions used to use fixed-size buffers to build their
+ * results. Now, they take an (already initialized) StringInfo object
+ * as a parameter, and append their text output to its contents.
+ * ----------
+ */
+static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
+ Bitmapset *rels_used);
+static void set_deparse_for_query(deparse_namespace *dpns, Query *query,
+ List *parent_namespaces);
+static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode);
+static void set_using_names(deparse_namespace *dpns, Node *jtnode,
+ List *parentUsing);
+static void set_relation_column_names(deparse_namespace *dpns,
+ RangeTblEntry *rte,
+ deparse_columns *colinfo);
+static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
+ deparse_columns *colinfo);
+static bool colname_is_unique(const char *colname, deparse_namespace *dpns,
+ deparse_columns *colinfo);
+static char *make_colname_unique(char *colname, deparse_namespace *dpns,
+ deparse_columns *colinfo);
+static void expand_colnames_array_to(deparse_columns *colinfo, int n);
+static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
+ deparse_columns *colinfo);
+static char *get_rtable_name(int rtindex, deparse_context *context);
+static void set_deparse_plan(deparse_namespace *dpns, Plan *plan);
+static Plan *find_recursive_union(deparse_namespace *dpns,
+ WorkTableScan *wtscan);
+static void push_child_plan(deparse_namespace *dpns, Plan *plan,
+ deparse_namespace *save_dpns);
+static void pop_child_plan(deparse_namespace *dpns,
+ deparse_namespace *save_dpns);
+static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell,
+ deparse_namespace *save_dpns);
+static void pop_ancestor_plan(deparse_namespace *dpns,
+ deparse_namespace *save_dpns);
+static void get_query_def(Query *query, StringInfo buf, List *parentnamespace,
+ TupleDesc resultDesc, bool colNamesVisible,
+ int prettyFlags, int wrapColumn, int startIndent);
+static void get_query_def_extended(Query *query, StringInfo buf,
+ List *parentnamespace, Oid distrelid, int64 shardid,
+ TupleDesc resultDesc, bool colNamesVisible,
+ int prettyFlags, int wrapColumn,
+ int startIndent);
+static void get_values_def(List *values_lists, deparse_context *context);
+static void get_with_clause(Query *query, deparse_context *context);
+static void get_select_query_def(Query *query, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible);
+static void get_insert_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible);
+static void get_update_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible);
+static void get_update_query_targetlist_def(Query *query, List *targetList,
+ deparse_context *context,
+ RangeTblEntry *rte);
+static void get_delete_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible);
+static void get_merge_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible);
+static void get_utility_query_def(Query *query, deparse_context *context);
+static void get_basic_select_query(Query *query, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible);
+static void get_target_list(List *targetList, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible);
+static void get_setop_query(Node *setOp, Query *query,
+ deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible);
+static Node *get_rule_sortgroupclause(Index ref, List *tlist,
+ bool force_colno,
+ deparse_context *context);
+static void get_rule_groupingset(GroupingSet *gset, List *targetlist,
+ bool omit_parens, deparse_context *context);
+static void get_rule_orderby(List *orderList, List *targetList,
+ bool force_colno, deparse_context *context);
+static void get_rule_windowclause(Query *query, deparse_context *context);
+static void get_rule_windowspec(WindowClause *wc, List *targetList,
+ deparse_context *context);
+static char *get_variable(Var *var, int levelsup, bool istoplevel,
+ deparse_context *context);
+static void get_special_variable(Node *node, deparse_context *context,
+ void *callback_arg);
+static void resolve_special_varno(Node *node, deparse_context *context,
+ rsv_callback callback, void *callback_arg);
+static Node *find_param_referent(Param *param, deparse_context *context,
+ deparse_namespace **dpns_p, ListCell **ancestor_cell_p);
+static void get_parameter(Param *param, deparse_context *context);
+static const char *get_simple_binary_op_name(OpExpr *expr);
+static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags);
+static void appendContextKeyword(deparse_context *context, const char *str,
+ int indentBefore, int indentAfter, int indentPlus);
+static void removeStringInfoSpaces(StringInfo str);
+static void get_rule_expr(Node *node, deparse_context *context,
+ bool showimplicit);
+static void get_rule_expr_toplevel(Node *node, deparse_context *context,
+ bool showimplicit);
+static void get_rule_list_toplevel(List *lst, deparse_context *context,
+ bool showimplicit);
+static void get_rule_expr_funccall(Node *node, deparse_context *context,
+ bool showimplicit);
+static bool looks_like_function(Node *node);
+static void get_oper_expr(OpExpr *expr, deparse_context *context);
+static void get_func_expr(FuncExpr *expr, deparse_context *context,
+ bool showimplicit);
+static void get_proc_expr(CallStmt *stmt, deparse_context *context,
+ bool showimplicit);
+static void get_agg_expr(Aggref *aggref, deparse_context *context,
+ Aggref *original_aggref);
+static void get_agg_expr_helper(Aggref *aggref, deparse_context *context,
+ Aggref *original_aggref, const char *funcname,
+ const char *options, bool is_json_objectagg);
+static void get_agg_combine_expr(Node *node, deparse_context *context,
+ void *callback_arg);
+static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context);
+static void get_windowfunc_expr_helper(WindowFunc *wfunc, deparse_context *context,
+ const char *funcname, const char *options,
+ bool is_json_objectagg);
+static bool get_func_sql_syntax(FuncExpr *expr, deparse_context *context);
+static void get_coercion_expr(Node *arg, deparse_context *context,
+ Oid resulttype, int32 resulttypmod,
+ Node *parentNode);
+static void get_const_expr(Const *constval, deparse_context *context,
+ int showtype);
+static void get_const_collation(Const *constval, deparse_context *context);
+static void get_json_format(JsonFormat *format, StringInfo buf);
+static void get_json_constructor(JsonConstructorExpr *ctor,
+ deparse_context *context, bool showimplicit);
+static void get_json_constructor_options(JsonConstructorExpr *ctor,
+ StringInfo buf);
+static void get_json_agg_constructor(JsonConstructorExpr *ctor,
+ deparse_context *context,
+ const char *funcname,
+ bool is_json_objectagg);
+static void simple_quote_literal(StringInfo buf, const char *val);
+static void get_sublink_expr(SubLink *sublink, deparse_context *context);
+static void get_tablefunc(TableFunc *tf, deparse_context *context,
+ bool showimplicit);
+static void get_from_clause(Query *query, const char *prefix,
+ deparse_context *context);
+static void get_from_clause_item(Node *jtnode, Query *query,
+ deparse_context *context);
+static void get_rte_alias(RangeTblEntry *rte, int varno, bool use_as,
+ deparse_context *context);
+static void get_column_alias_list(deparse_columns *colinfo,
+ deparse_context *context);
+static void get_from_clause_coldeflist(RangeTblFunction *rtfunc,
+ deparse_columns *colinfo,
+ deparse_context *context);
+static void get_tablesample_def(TableSampleClause *tablesample,
+ deparse_context *context);
+static void get_opclass_name(Oid opclass, Oid actual_datatype,
+ StringInfo buf);
+static Node *processIndirection(Node *node, deparse_context *context);
+static void printSubscripts(SubscriptingRef *aref, deparse_context *context);
+static char *get_relation_name(Oid relid);
+static char *generate_relation_or_shard_name(Oid relid, Oid distrelid,
+ int64 shardid, List *namespaces);
+static char *generate_rte_shard_name(RangeTblEntry *rangeTableEntry);
+static char *generate_fragment_name(char *schemaName, char *tableName);
+static char *generate_function_name(Oid funcid, int nargs,
+ List *argnames, Oid *argtypes,
+ bool has_variadic, bool *use_variadic_p,
+ ParseExprKind special_exprkind);
+static List *get_insert_column_names_list(List *targetList, StringInfo buf, deparse_context *context, RangeTblEntry *rte);
+
+#define only_marker(rte) ((rte)->inh ? "" : "ONLY ")
+
+
+
+/*
+ * pg_get_query_def parses back one query tree, and outputs the resulting query
+ * string into given buffer.
+ */
+void
+pg_get_query_def(Query *query, StringInfo buffer)
+{
+ get_query_def(query, buffer, NIL, NULL, false, 0, WRAP_COLUMN_DEFAULT, 0);
+}
+
+/*
+ * get_merged_argument_list merges both the IN and OUT arguments lists into one and
+ * also eliminates the INOUT duplicates(present in both the lists). After merging both
+ * the lists, it returns all the named-arguments in a list(mergedNamedArgList) along
+ * with their types(mergedNamedArgTypes), final argument list(mergedArgumentList), and
+ * the total number of arguments(totalArguments).
+ */
+bool
+get_merged_argument_list(CallStmt *stmt, List **mergedNamedArgList,
+ Oid **mergedNamedArgTypes,
+ List **mergedArgumentList,
+ int *totalArguments)
+{
+
+ Oid functionOid = stmt->funcexpr->funcid;
+ List *namedArgList = NIL;
+ List *finalArgumentList = NIL;
+ Oid *finalArgTypes;
+ Oid *argTypes = NULL;
+ char *argModes = NULL;
+ char **argNames = NULL;
+ int argIndex = 0;
+
+ HeapTuple proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionOid));
+ if (!HeapTupleIsValid(proctup))
+ {
+ elog(ERROR, "cache lookup failed for function %u", functionOid);
+ }
+
+ int defArgs = get_func_arg_info(proctup, &argTypes, &argNames, &argModes);
+ ReleaseSysCache(proctup);
+
+ if (argModes == NULL)
+ {
+ /* No OUT arguments */
+ return false;
+ }
+
+ /*
+ * Passed arguments Includes IN, OUT, INOUT (in both the lists) and VARIADIC arguments,
+ * which means INOUT arguments are double counted.
+ */
+ int numberOfArgs = list_length(stmt->funcexpr->args) + list_length(stmt->outargs);
+ int totalInoutArgs = 0;
+
+ /* Let's count INOUT arguments from the defined number of arguments */
+ for (argIndex=0; argIndex < defArgs; ++argIndex)
+ {
+ if (argModes[argIndex] == PROARGMODE_INOUT)
+ totalInoutArgs++;
+ }
+
+ /* Remove the duplicate INOUT counting */
+ numberOfArgs = numberOfArgs - totalInoutArgs;
+ finalArgTypes = palloc0(sizeof(Oid) * numberOfArgs);
+
+ ListCell *inArgCell = list_head(stmt->funcexpr->args);
+ ListCell *outArgCell = list_head(stmt->outargs);
+
+ for (argIndex=0; argIndex < numberOfArgs; ++argIndex)
+ {
+ switch (argModes[argIndex])
+ {
+ case PROARGMODE_IN:
+ case PROARGMODE_VARIADIC:
+ {
+ Node *arg = (Node *) lfirst(inArgCell);
+
+ if (IsA(arg, NamedArgExpr))
+ namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name);
+ finalArgTypes[argIndex] = exprType(arg);
+ finalArgumentList = lappend(finalArgumentList, arg);
+ inArgCell = lnext(stmt->funcexpr->args, inArgCell);
+ break;
+ }
+
+ case PROARGMODE_OUT:
+ {
+ Node *arg = (Node *) lfirst(outArgCell);
+
+ if (IsA(arg, NamedArgExpr))
+ namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name);
+ finalArgTypes[argIndex] = exprType(arg);
+ finalArgumentList = lappend(finalArgumentList, arg);
+ outArgCell = lnext(stmt->outargs, outArgCell);
+ break;
+ }
+
+ case PROARGMODE_INOUT:
+ {
+ Node *arg = (Node *) lfirst(inArgCell);
+
+ if (IsA(arg, NamedArgExpr))
+ namedArgList = lappend(namedArgList, ((NamedArgExpr *) arg)->name);
+ finalArgTypes[argIndex] = exprType(arg);
+ finalArgumentList = lappend(finalArgumentList, arg);
+ inArgCell = lnext(stmt->funcexpr->args, inArgCell);
+ outArgCell = lnext(stmt->outargs, outArgCell);
+ break;
+ }
+
+ case PROARGMODE_TABLE:
+ default:
+ {
+ elog(ERROR, "Unhandled procedure argument mode[%d]", argModes[argIndex]);
+ break;
+ }
+ }
+ }
+
+ /*
+ * After eliminating INOUT duplicates and merging OUT arguments, we now
+ * have the final list of arguments.
+ */
+ if (defArgs != list_length(finalArgumentList))
+ {
+ elog(ERROR, "Insufficient number of args passed[%d] for function[%s]",
+ list_length(finalArgumentList),
+ get_func_name(functionOid));
+ }
+
+ if (list_length(finalArgumentList) > FUNC_MAX_ARGS)
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_TOO_MANY_ARGUMENTS),
+ errmsg("too many arguments[%d] for function[%s]",
+ list_length(finalArgumentList),
+ get_func_name(functionOid))));
+ }
+
+ *mergedNamedArgList = namedArgList;
+ *mergedNamedArgTypes = finalArgTypes;
+ *mergedArgumentList = finalArgumentList;
+ *totalArguments = numberOfArgs;
+
+ return true;
+}
+
+/*
+ * pg_get_rule_expr deparses an expression and returns the result as a string.
+ */
+char *
+pg_get_rule_expr(Node *expression)
+{
+ bool showImplicitCasts = true;
+ deparse_context context;
+ StringInfo buffer = makeStringInfo();
+
+ /*
+ * Set search_path to NIL so that all objects outside of pg_catalog will be
+ * schema-prefixed. pg_catalog will be added automatically when we call
+ * PushEmptySearchPath().
+ */
+ int saveNestLevel = PushEmptySearchPath();
+
+ context.buf = buffer;
+ context.namespaces = NIL;
+ context.windowClause = NIL;
+ context.windowTList = NIL;
+ context.varprefix = false;
+ context.prettyFlags = 0;
+ context.wrapColumn = WRAP_COLUMN_DEFAULT;
+ context.indentLevel = 0;
+ context.special_exprkind = EXPR_KIND_NONE;
+ context.distrelid = InvalidOid;
+ context.shardid = INVALID_SHARD_ID;
+
+ get_rule_expr(expression, &context, showImplicitCasts);
+
+ /* revert back to original search_path */
+ PopEmptySearchPath(saveNestLevel);
+
+ return buffer->data;
+}
+
+/*
+ * set_rtable_names: select RTE aliases to be used in printing a query
+ *
+ * We fill in dpns->rtable_names with a list of names that is one-for-one with
+ * the already-filled dpns->rtable list. Each RTE name is unique among those
+ * in the new namespace plus any ancestor namespaces listed in
+ * parent_namespaces.
+ *
+ * If rels_used isn't NULL, only RTE indexes listed in it are given aliases.
+ *
+ * Note that this function is only concerned with relation names, not column
+ * names.
+ */
+static void
+set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
+ Bitmapset *rels_used)
+{
+ HASHCTL hash_ctl;
+ HTAB *names_hash;
+ NameHashEntry *hentry;
+ bool found;
+ int rtindex;
+ ListCell *lc;
+
+ dpns->rtable_names = NIL;
+ /* nothing more to do if empty rtable */
+ if (dpns->rtable == NIL)
+ return;
+
+ /*
+ * We use a hash table to hold known names, so that this process is O(N)
+ * not O(N^2) for N names.
+ */
+ hash_ctl.keysize = NAMEDATALEN;
+ hash_ctl.entrysize = sizeof(NameHashEntry);
+ hash_ctl.hcxt = CurrentMemoryContext;
+ names_hash = hash_create("set_rtable_names names",
+ list_length(dpns->rtable),
+ &hash_ctl,
+ HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
+
+ /* Preload the hash table with names appearing in parent_namespaces */
+ foreach(lc, parent_namespaces)
+ {
+ deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc);
+ ListCell *lc2;
+
+ foreach(lc2, olddpns->rtable_names)
+ {
+ char *oldname = (char *) lfirst(lc2);
+
+ if (oldname == NULL)
+ continue;
+ hentry = (NameHashEntry *) hash_search(names_hash,
+ oldname,
+ HASH_ENTER,
+ &found);
+ /* we do not complain about duplicate names in parent namespaces */
+ hentry->counter = 0;
+ }
+ }
+
+ /* Now we can scan the rtable */
+ rtindex = 1;
+ foreach(lc, dpns->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+ char *refname;
+
+ /* Just in case this takes an unreasonable amount of time ... */
+ CHECK_FOR_INTERRUPTS();
+
+ if (rels_used && !bms_is_member(rtindex, rels_used))
+ {
+ /* Ignore unreferenced RTE */
+ refname = NULL;
+ }
+ else if (rte->alias)
+ {
+ /* If RTE has a user-defined alias, prefer that */
+ refname = rte->alias->aliasname;
+ }
+ else if (rte->rtekind == RTE_RELATION)
+ {
+ /* Use the current actual name of the relation */
+ refname = get_rel_name(rte->relid);
+ }
+ else if (rte->rtekind == RTE_JOIN)
+ {
+ /* Unnamed join has no refname */
+ refname = NULL;
+ }
+ else
+ {
+ /* Otherwise use whatever the parser assigned */
+ refname = rte->eref->aliasname;
+ }
+
+ /*
+ * If the selected name isn't unique, append digits to make it so, and
+ * make a new hash entry for it once we've got a unique name. For a
+ * very long input name, we might have to truncate to stay within
+ * NAMEDATALEN.
+ */
+ if (refname)
+ {
+ hentry = (NameHashEntry *) hash_search(names_hash,
+ refname,
+ HASH_ENTER,
+ &found);
+ if (found)
+ {
+ /* Name already in use, must choose a new one */
+ int refnamelen = strlen(refname);
+ char *modname = (char *) palloc(refnamelen + 16);
+ NameHashEntry *hentry2;
+
+ do
+ {
+ hentry->counter++;
+ for (;;)
+ {
+ memcpy(modname, refname, refnamelen);
+ sprintf(modname + refnamelen, "_%d", hentry->counter);
+ if (strlen(modname) < NAMEDATALEN)
+ break;
+ /* drop chars from refname to keep all the digits */
+ refnamelen = pg_mbcliplen(refname, refnamelen,
+ refnamelen - 1);
+ }
+ hentry2 = (NameHashEntry *) hash_search(names_hash,
+ modname,
+ HASH_ENTER,
+ &found);
+ } while (found);
+ hentry2->counter = 0; /* init new hash entry */
+ refname = modname;
+ }
+ else
+ {
+ /* Name not previously used, need only initialize hentry */
+ hentry->counter = 0;
+ }
+ }
+
+ dpns->rtable_names = lappend(dpns->rtable_names, refname);
+ rtindex++;
+ }
+
+ hash_destroy(names_hash);
+}
+
+/*
+ * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree
+ *
+ * For convenience, this is defined to initialize the deparse_namespace struct
+ * from scratch.
+ */
+static void
+set_deparse_for_query(deparse_namespace *dpns, Query *query,
+ List *parent_namespaces)
+{
+ ListCell *lc;
+ ListCell *lc2;
+
+ /* Initialize *dpns and fill rtable/ctes links */
+ memset(dpns, 0, sizeof(deparse_namespace));
+ dpns->rtable = query->rtable;
+ dpns->subplans = NIL;
+ dpns->ctes = query->cteList;
+ dpns->appendrels = NULL;
+
+ /* Assign a unique relation alias to each RTE */
+ set_rtable_names(dpns, parent_namespaces, NULL);
+
+ /* Initialize dpns->rtable_columns to contain zeroed structs */
+ dpns->rtable_columns = NIL;
+ while (list_length(dpns->rtable_columns) < list_length(dpns->rtable))
+ dpns->rtable_columns = lappend(dpns->rtable_columns,
+ palloc0(sizeof(deparse_columns)));
+
+ /* If it's a utility query, it won't have a jointree */
+ if (query->jointree)
+ {
+ /* Detect whether global uniqueness of USING names is needed */
+ dpns->unique_using =
+ has_dangerous_join_using(dpns, (Node *) query->jointree);
+
+ /*
+ * Select names for columns merged by USING, via a recursive pass over
+ * the query jointree.
+ */
+ set_using_names(dpns, (Node *) query->jointree, NIL);
+ }
+
+ /*
+ * Now assign remaining column aliases for each RTE. We do this in a
+ * linear scan of the rtable, so as to process RTEs whether or not they
+ * are in the jointree (we mustn't miss NEW.*, INSERT target relations,
+ * etc). JOIN RTEs must be processed after their children, but this is
+ * okay because they appear later in the rtable list than their children
+ * (cf Asserts in identify_join_columns()).
+ */
+ forboth(lc, dpns->rtable, lc2, dpns->rtable_columns)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+ deparse_columns *colinfo = (deparse_columns *) lfirst(lc2);
+
+ if (rte->rtekind == RTE_JOIN)
+ set_join_column_names(dpns, rte, colinfo);
+ else
+ set_relation_column_names(dpns, rte, colinfo);
+ }
+}
+
+/*
+ * has_dangerous_join_using: search jointree for unnamed JOIN USING
+ *
+ * Merged columns of a JOIN USING may act differently from either of the input
+ * columns, either because they are merged with COALESCE (in a FULL JOIN) or
+ * because an implicit coercion of the underlying input column is required.
+ * In such a case the column must be referenced as a column of the JOIN not as
+ * a column of either input. And this is problematic if the join is unnamed
+ * (alias-less): we cannot qualify the column's name with an RTE name, since
+ * there is none. (Forcibly assigning an alias to the join is not a solution,
+ * since that will prevent legal references to tables below the join.)
+ * To ensure that every column in the query is unambiguously referenceable,
+ * we must assign such merged columns names that are globally unique across
+ * the whole query, aliasing other columns out of the way as necessary.
+ *
+ * Because the ensuing re-aliasing is fairly damaging to the readability of
+ * the query, we don't do this unless we have to. So, we must pre-scan
+ * the join tree to see if we have to, before starting set_using_names().
+ */
+static bool
+has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode)
+{
+ if (IsA(jtnode, RangeTblRef))
+ {
+ /* nothing to do here */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *lc;
+
+ foreach(lc, f->fromlist)
+ {
+ if (has_dangerous_join_using(dpns, (Node *) lfirst(lc)))
+ return true;
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+
+ /* Is it an unnamed JOIN with USING? */
+ if (j->alias == NULL && j->usingClause)
+ {
+ /*
+ * Yes, so check each join alias var to see if any of them are not
+ * simple references to underlying columns. If so, we have a
+ * dangerous situation and must pick unique aliases.
+ */
+ RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable);
+
+ /* We need only examine the merged columns */
+ for (int i = 0; i < jrte->joinmergedcols; i++)
+ {
+ Node *aliasvar = list_nth(jrte->joinaliasvars, i);
+
+ if (!IsA(aliasvar, Var))
+ return true;
+ }
+ }
+
+ /* Nope, but inspect children */
+ if (has_dangerous_join_using(dpns, j->larg))
+ return true;
+ if (has_dangerous_join_using(dpns, j->rarg))
+ return true;
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+ return false;
+}
+
+/*
+ * set_using_names: select column aliases to be used for merged USING columns
+ *
+ * We do this during a recursive descent of the query jointree.
+ * dpns->unique_using must already be set to determine the global strategy.
+ *
+ * Column alias info is saved in the dpns->rtable_columns list, which is
+ * assumed to be filled with pre-zeroed deparse_columns structs.
+ *
+ * parentUsing is a list of all USING aliases assigned in parent joins of
+ * the current jointree node. (The passed-in list must not be modified.)
+ */
+static void
+set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing)
+{
+ if (IsA(jtnode, RangeTblRef))
+ {
+ /* nothing to do now */
+ }
+ else if (IsA(jtnode, FromExpr))
+ {
+ FromExpr *f = (FromExpr *) jtnode;
+ ListCell *lc;
+
+ foreach(lc, f->fromlist)
+ set_using_names(dpns, (Node *) lfirst(lc), parentUsing);
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable);
+ deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns);
+ int *leftattnos;
+ int *rightattnos;
+ deparse_columns *leftcolinfo;
+ deparse_columns *rightcolinfo;
+ int i;
+ ListCell *lc;
+
+ /* Get info about the shape of the join */
+ identify_join_columns(j, rte, colinfo);
+ leftattnos = colinfo->leftattnos;
+ rightattnos = colinfo->rightattnos;
+
+ /* Look up the not-yet-filled-in child deparse_columns structs */
+ leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns);
+ rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns);
+
+ /*
+ * If this join is unnamed, then we cannot substitute new aliases at
+ * this level, so any name requirements pushed down to here must be
+ * pushed down again to the children.
+ */
+ if (rte->alias == NULL)
+ {
+ for (i = 0; i < colinfo->num_cols; i++)
+ {
+ char *colname = colinfo->colnames[i];
+
+ if (colname == NULL)
+ continue;
+
+ /* Push down to left column, unless it's a system column */
+ if (leftattnos[i] > 0)
+ {
+ expand_colnames_array_to(leftcolinfo, leftattnos[i]);
+ leftcolinfo->colnames[leftattnos[i] - 1] = colname;
+ }
+
+ /* Same on the righthand side */
+ if (rightattnos[i] > 0)
+ {
+ expand_colnames_array_to(rightcolinfo, rightattnos[i]);
+ rightcolinfo->colnames[rightattnos[i] - 1] = colname;
+ }
+ }
+ }
+
+ /*
+ * If there's a USING clause, select the USING column names and push
+ * those names down to the children. We have two strategies:
+ *
+ * If dpns->unique_using is true, we force all USING names to be
+ * unique across the whole query level. In principle we'd only need
+ * the names of dangerous USING columns to be globally unique, but to
+ * safely assign all USING names in a single pass, we have to enforce
+ * the same uniqueness rule for all of them. However, if a USING
+ * column's name has been pushed down from the parent, we should use
+ * it as-is rather than making a uniqueness adjustment. This is
+ * necessary when we're at an unnamed join, and it creates no risk of
+ * ambiguity. Also, if there's a user-written output alias for a
+ * merged column, we prefer to use that rather than the input name;
+ * this simplifies the logic and seems likely to lead to less aliasing
+ * overall.
+ *
+ * If dpns->unique_using is false, we only need USING names to be
+ * unique within their own join RTE. We still need to honor
+ * pushed-down names, though.
+ *
+ * Though significantly different in results, these two strategies are
+ * implemented by the same code, with only the difference of whether
+ * to put assigned names into dpns->using_names.
+ */
+ if (j->usingClause)
+ {
+ /* Copy the input parentUsing list so we don't modify it */
+ parentUsing = list_copy(parentUsing);
+
+ /* USING names must correspond to the first join output columns */
+ expand_colnames_array_to(colinfo, list_length(j->usingClause));
+ i = 0;
+ foreach(lc, j->usingClause)
+ {
+ char *colname = strVal(lfirst(lc));
+
+ /* Assert it's a merged column */
+ Assert(leftattnos[i] != 0 && rightattnos[i] != 0);
+
+ /* Adopt passed-down name if any, else select unique name */
+ if (colinfo->colnames[i] != NULL)
+ colname = colinfo->colnames[i];
+ else
+ {
+ /* Prefer user-written output alias if any */
+ if (rte->alias && i < list_length(rte->alias->colnames))
+ colname = strVal(list_nth(rte->alias->colnames, i));
+ /* Make it appropriately unique */
+ colname = make_colname_unique(colname, dpns, colinfo);
+ if (dpns->unique_using)
+ dpns->using_names = lappend(dpns->using_names,
+ colname);
+ /* Save it as output column name, too */
+ colinfo->colnames[i] = colname;
+ }
+
+ /* Remember selected names for use later */
+ colinfo->usingNames = lappend(colinfo->usingNames, colname);
+ parentUsing = lappend(parentUsing, colname);
+
+ /* Push down to left column, unless it's a system column */
+ if (leftattnos[i] > 0)
+ {
+ expand_colnames_array_to(leftcolinfo, leftattnos[i]);
+ leftcolinfo->colnames[leftattnos[i] - 1] = colname;
+ }
+
+ /* Same on the righthand side */
+ if (rightattnos[i] > 0)
+ {
+ expand_colnames_array_to(rightcolinfo, rightattnos[i]);
+ rightcolinfo->colnames[rightattnos[i] - 1] = colname;
+ }
+
+ i++;
+ }
+ }
+
+ /* Mark child deparse_columns structs with correct parentUsing info */
+ leftcolinfo->parentUsing = parentUsing;
+ rightcolinfo->parentUsing = parentUsing;
+
+ /* Now recursively assign USING column names in children */
+ set_using_names(dpns, j->larg, parentUsing);
+ set_using_names(dpns, j->rarg, parentUsing);
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+/*
+ * set_relation_column_names: select column aliases for a non-join RTE
+ *
+ * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed.
+ * If any colnames entries are already filled in, those override local
+ * choices.
+ */
+static void
+set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
+ deparse_columns *colinfo)
+{
+ int ncolumns;
+ char **real_colnames;
+ bool changed_any;
+ bool has_anonymous;
+ int noldcolumns;
+ int i;
+ int j;
+
+ /*
+ * Construct an array of the current "real" column names of the RTE.
+ * real_colnames[] will be indexed by physical column number, with NULL
+ * entries for dropped columns.
+ */
+ if (rte->rtekind == RTE_RELATION ||
+ GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ /* Relation --- look to the system catalogs for up-to-date info */
+ Relation rel;
+ TupleDesc tupdesc;
+
+ rel = relation_open(rte->relid, AccessShareLock);
+ tupdesc = RelationGetDescr(rel);
+
+ ncolumns = tupdesc->natts;
+ real_colnames = (char **) palloc(ncolumns * sizeof(char *));
+
+ for (i = 0; i < ncolumns; i++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
+
+ if (attr->attisdropped)
+ real_colnames[i] = NULL;
+ else
+ real_colnames[i] = pstrdup(NameStr(attr->attname));
+ }
+ relation_close(rel, AccessShareLock);
+ }
+ else
+ {
+ /* Otherwise get the column names from eref or expandRTE() */
+ List *colnames;
+ ListCell *lc;
+
+ /*
+ * Functions returning composites have the annoying property that some
+ * of the composite type's columns might have been dropped since the
+ * query was parsed. If possible, use expandRTE() to handle that
+ * case, since it has the tedious logic needed to find out about
+ * dropped columns. However, if we're explaining a plan, then we
+ * don't have rte->functions because the planner thinks that won't be
+ * needed later, and that breaks expandRTE(). So in that case we have
+ * to rely on rte->eref, which may lead us to report a dropped
+ * column's old name; that seems close enough for EXPLAIN's purposes.
+ *
+ * For non-RELATION, non-FUNCTION RTEs, we can just look at rte->eref,
+ * which should be sufficiently up-to-date: no other RTE types can
+ * have columns get dropped from under them after parsing.
+ */
+ if (rte->rtekind == RTE_FUNCTION && rte->functions != NIL)
+ {
+ /* Since we're not creating Vars, rtindex etc. don't matter */
+ expandRTE(rte, 1, 0, -1, true /* include dropped */ ,
+ &colnames, NULL);
+ }
+ else
+ colnames = rte->eref->colnames;
+
+ ncolumns = list_length(colnames);
+ real_colnames = (char **) palloc(ncolumns * sizeof(char *));
+
+ i = 0;
+ foreach(lc, colnames)
+ {
+ /*
+ * If the column name we find here is an empty string, then it's a
+ * dropped column, so change to NULL.
+ */
+ char *cname = strVal(lfirst(lc));
+
+ if (cname[0] == '\0')
+ cname = NULL;
+ real_colnames[i] = cname;
+ i++;
+ }
+ }
+
+ /*
+ * Ensure colinfo->colnames has a slot for each column. (It could be long
+ * enough already, if we pushed down a name for the last column.) Note:
+ * it's possible that there are now more columns than there were when the
+ * query was parsed, ie colnames could be longer than rte->eref->colnames.
+ * We must assign unique aliases to the new columns too, else there could
+ * be unresolved conflicts when the view/rule is reloaded.
+ */
+ expand_colnames_array_to(colinfo, ncolumns);
+ Assert(colinfo->num_cols == ncolumns);
+
+ /*
+ * Make sufficiently large new_colnames and is_new_col arrays, too.
+ *
+ * Note: because we leave colinfo->num_new_cols zero until after the loop,
+ * colname_is_unique will not consult that array, which is fine because it
+ * would only be duplicate effort.
+ */
+ colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *));
+ colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool));
+
+ /*
+ * Scan the columns, select a unique alias for each one, and store it in
+ * colinfo->colnames and colinfo->new_colnames. The former array has NULL
+ * entries for dropped columns, the latter omits them. Also mark
+ * new_colnames entries as to whether they are new since parse time; this
+ * is the case for entries beyond the length of rte->eref->colnames.
+ */
+ noldcolumns = list_length(rte->eref->colnames);
+ changed_any = false;
+ has_anonymous = false;
+ j = 0;
+ for (i = 0; i < ncolumns; i++)
+ {
+ char *real_colname = real_colnames[i];
+ char *colname = colinfo->colnames[i];
+
+ /* Skip dropped columns */
+ if (real_colname == NULL)
+ {
+ Assert(colname == NULL); /* colnames[i] is already NULL */
+ continue;
+ }
+
+ /* If alias already assigned, that's what to use */
+ if (colname == NULL)
+ {
+ /* If user wrote an alias, prefer that over real column name */
+ if (rte->alias && i < list_length(rte->alias->colnames))
+ colname = strVal(list_nth(rte->alias->colnames, i));
+ else
+ colname = real_colname;
+
+ /* Unique-ify and insert into colinfo */
+ colname = make_colname_unique(colname, dpns, colinfo);
+
+ colinfo->colnames[i] = colname;
+ }
+
+ /* Put names of non-dropped columns in new_colnames[] too */
+ colinfo->new_colnames[j] = colname;
+ /* And mark them as new or not */
+ colinfo->is_new_col[j] = (i >= noldcolumns);
+ j++;
+
+ /* Remember if any assigned aliases differ from "real" name */
+ if (!changed_any && strcmp(colname, real_colname) != 0)
+ changed_any = true;
+
+ /*
+ * Remember if there is a reference to an anonymous column as named by
+ * char * FigureColname(Node *node)
+ */
+ if (!has_anonymous && strcmp(real_colname, "?column?") == 0)
+ has_anonymous = true;
+ }
+
+ /*
+ * Set correct length for new_colnames[] array. (Note: if columns have
+ * been added, colinfo->num_cols includes them, which is not really quite
+ * right but is harmless, since any new columns must be at the end where
+ * they won't affect varattnos of pre-existing columns.)
+ */
+ colinfo->num_new_cols = j;
+
+ /*
+ * For a relation RTE, we need only print the alias column names if any
+ * are different from the underlying "real" names. For a function RTE,
+ * always emit a complete column alias list; this is to protect against
+ * possible instability of the default column names (eg, from altering
+ * parameter names). For tablefunc RTEs, we never print aliases, because
+ * the column names are part of the clause itself. For other RTE types,
+ * print if we changed anything OR if there were user-written column
+ * aliases (since the latter would be part of the underlying "reality").
+ */
+ if (rte->rtekind == RTE_RELATION)
+ colinfo->printaliases = changed_any;
+ else if (rte->rtekind == RTE_FUNCTION)
+ colinfo->printaliases = true;
+ else if (rte->rtekind == RTE_TABLEFUNC)
+ colinfo->printaliases = false;
+ else if (rte->alias && rte->alias->colnames != NIL)
+ colinfo->printaliases = true;
+ else
+ colinfo->printaliases = changed_any || has_anonymous;
+}
+
+/*
+ * set_join_column_names: select column aliases for a join RTE
+ *
+ * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed.
+ * If any colnames entries are already filled in, those override local
+ * choices. Also, names for USING columns were already chosen by
+ * set_using_names(). We further expect that column alias selection has been
+ * completed for both input RTEs.
+ */
+static void
+set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
+ deparse_columns *colinfo)
+{
+ deparse_columns *leftcolinfo;
+ deparse_columns *rightcolinfo;
+ bool changed_any;
+ int noldcolumns;
+ int nnewcolumns;
+ Bitmapset *leftmerged = NULL;
+ Bitmapset *rightmerged = NULL;
+ int i;
+ int j;
+ int ic;
+ int jc;
+
+ /* Look up the previously-filled-in child deparse_columns structs */
+ leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns);
+ rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns);
+
+ /*
+ * Ensure colinfo->colnames has a slot for each column. (It could be long
+ * enough already, if we pushed down a name for the last column.) Note:
+ * it's possible that one or both inputs now have more columns than there
+ * were when the query was parsed, but we'll deal with that below. We
+ * only need entries in colnames for pre-existing columns.
+ */
+ noldcolumns = list_length(rte->eref->colnames);
+ expand_colnames_array_to(colinfo, noldcolumns);
+ Assert(colinfo->num_cols == noldcolumns);
+
+ /*
+ * Scan the join output columns, select an alias for each one, and store
+ * it in colinfo->colnames. If there are USING columns, set_using_names()
+ * already selected their names, so we can start the loop at the first
+ * non-merged column.
+ */
+ changed_any = false;
+ for (i = list_length(colinfo->usingNames); i < noldcolumns; i++)
+ {
+ char *colname = colinfo->colnames[i];
+ char *real_colname;
+
+ /* Join column must refer to at least one input column */
+ Assert(colinfo->leftattnos[i] != 0 || colinfo->rightattnos[i] != 0);
+
+ /* Get the child column name */
+ if (colinfo->leftattnos[i] > 0)
+ real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1];
+ else if (colinfo->rightattnos[i] > 0)
+ real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1];
+ else
+ {
+ /* We're joining system columns --- use eref name */
+ real_colname = strVal(list_nth(rte->eref->colnames, i));
+ }
+ /* If child col has been dropped, no need to assign a join colname */
+ if (real_colname == NULL)
+ {
+ colinfo->colnames[i] = NULL;
+ continue;
+ }
+
+ /* In an unnamed join, just report child column names as-is */
+ if (rte->alias == NULL)
+ {
+ colinfo->colnames[i] = real_colname;
+ continue;
+ }
+
+ /* If alias already assigned, that's what to use */
+ if (colname == NULL)
+ {
+ /* If user wrote an alias, prefer that over real column name */
+ if (rte->alias && i < list_length(rte->alias->colnames))
+ colname = strVal(list_nth(rte->alias->colnames, i));
+ else
+ colname = real_colname;
+
+ /* Unique-ify and insert into colinfo */
+ colname = make_colname_unique(colname, dpns, colinfo);
+
+ colinfo->colnames[i] = colname;
+ }
+
+ /* Remember if any assigned aliases differ from "real" name */
+ if (!changed_any && strcmp(colname, real_colname) != 0)
+ changed_any = true;
+ }
+
+ /*
+ * Calculate number of columns the join would have if it were re-parsed
+ * now, and create storage for the new_colnames and is_new_col arrays.
+ *
+ * Note: colname_is_unique will be consulting new_colnames[] during the
+ * loops below, so its not-yet-filled entries must be zeroes.
+ */
+ nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols -
+ list_length(colinfo->usingNames);
+ colinfo->num_new_cols = nnewcolumns;
+ colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *));
+ colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool));
+
+ /*
+ * Generating the new_colnames array is a bit tricky since any new columns
+ * added since parse time must be inserted in the right places. This code
+ * must match the parser, which will order a join's columns as merged
+ * columns first (in USING-clause order), then non-merged columns from the
+ * left input (in attnum order), then non-merged columns from the right
+ * input (ditto). If one of the inputs is itself a join, its columns will
+ * be ordered according to the same rule, which means newly-added columns
+ * might not be at the end. We can figure out what's what by consulting
+ * the leftattnos and rightattnos arrays plus the input is_new_col arrays.
+ *
+ * In these loops, i indexes leftattnos/rightattnos (so it's join varattno
+ * less one), j indexes new_colnames/is_new_col, and ic/jc have similar
+ * meanings for the current child RTE.
+ */
+
+ /* Handle merged columns; they are first and can't be new */
+ i = j = 0;
+ while (i < noldcolumns &&
+ colinfo->leftattnos[i] != 0 &&
+ colinfo->rightattnos[i] != 0)
+ {
+ /* column name is already determined and known unique */
+ colinfo->new_colnames[j] = colinfo->colnames[i];
+ colinfo->is_new_col[j] = false;
+
+ /* build bitmapsets of child attnums of merged columns */
+ if (colinfo->leftattnos[i] > 0)
+ leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]);
+ if (colinfo->rightattnos[i] > 0)
+ rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]);
+
+ i++, j++;
+ }
+
+ /* Handle non-merged left-child columns */
+ ic = 0;
+ for (jc = 0; jc < leftcolinfo->num_new_cols; jc++)
+ {
+ char *child_colname = leftcolinfo->new_colnames[jc];
+
+ if (!leftcolinfo->is_new_col[jc])
+ {
+ /* Advance ic to next non-dropped old column of left child */
+ while (ic < leftcolinfo->num_cols &&
+ leftcolinfo->colnames[ic] == NULL)
+ ic++;
+ Assert(ic < leftcolinfo->num_cols);
+ ic++;
+ /* If it is a merged column, we already processed it */
+ if (bms_is_member(ic, leftmerged))
+ continue;
+ /* Else, advance i to the corresponding existing join column */
+ while (i < colinfo->num_cols &&
+ colinfo->colnames[i] == NULL)
+ i++;
+ Assert(i < colinfo->num_cols);
+ Assert(ic == colinfo->leftattnos[i]);
+ /* Use the already-assigned name of this column */
+ colinfo->new_colnames[j] = colinfo->colnames[i];
+ i++;
+ }
+ else
+ {
+ /*
+ * Unique-ify the new child column name and assign, unless we're
+ * in an unnamed join, in which case just copy
+ */
+ if (rte->alias != NULL)
+ {
+ colinfo->new_colnames[j] =
+ make_colname_unique(child_colname, dpns, colinfo);
+ if (!changed_any &&
+ strcmp(colinfo->new_colnames[j], child_colname) != 0)
+ changed_any = true;
+ }
+ else
+ colinfo->new_colnames[j] = child_colname;
+ }
+
+ colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc];
+ j++;
+ }
+
+ /* Handle non-merged right-child columns in exactly the same way */
+ ic = 0;
+ for (jc = 0; jc < rightcolinfo->num_new_cols; jc++)
+ {
+ char *child_colname = rightcolinfo->new_colnames[jc];
+
+ if (!rightcolinfo->is_new_col[jc])
+ {
+ /* Advance ic to next non-dropped old column of right child */
+ while (ic < rightcolinfo->num_cols &&
+ rightcolinfo->colnames[ic] == NULL)
+ ic++;
+ Assert(ic < rightcolinfo->num_cols);
+ ic++;
+ /* If it is a merged column, we already processed it */
+ if (bms_is_member(ic, rightmerged))
+ continue;
+ /* Else, advance i to the corresponding existing join column */
+ while (i < colinfo->num_cols &&
+ colinfo->colnames[i] == NULL)
+ i++;
+ Assert(i < colinfo->num_cols);
+ Assert(ic == colinfo->rightattnos[i]);
+ /* Use the already-assigned name of this column */
+ colinfo->new_colnames[j] = colinfo->colnames[i];
+ i++;
+ }
+ else
+ {
+ /*
+ * Unique-ify the new child column name and assign, unless we're
+ * in an unnamed join, in which case just copy
+ */
+ if (rte->alias != NULL)
+ {
+ colinfo->new_colnames[j] =
+ make_colname_unique(child_colname, dpns, colinfo);
+ if (!changed_any &&
+ strcmp(colinfo->new_colnames[j], child_colname) != 0)
+ changed_any = true;
+ }
+ else
+ colinfo->new_colnames[j] = child_colname;
+ }
+
+ colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc];
+ j++;
+ }
+
+ /* Assert we processed the right number of columns */
+#ifdef USE_ASSERT_CHECKING
+ while (i < colinfo->num_cols && colinfo->colnames[i] == NULL)
+ i++;
+ Assert(i == colinfo->num_cols);
+ Assert(j == nnewcolumns);
+#endif
+
+ /*
+ * For a named join, print column aliases if we changed any from the child
+ * names. Unnamed joins cannot print aliases.
+ */
+ if (rte->alias != NULL)
+ colinfo->printaliases = changed_any;
+ else
+ colinfo->printaliases = false;
+}
+
+/*
+ * colname_is_unique: is colname distinct from already-chosen column names?
+ *
+ * dpns is query-wide info, colinfo is for the column's RTE
+ */
+static bool
+colname_is_unique(const char *colname, deparse_namespace *dpns,
+ deparse_columns *colinfo)
+{
+ int i;
+ ListCell *lc;
+
+ /* Check against already-assigned column aliases within RTE */
+ for (i = 0; i < colinfo->num_cols; i++)
+ {
+ char *oldname = colinfo->colnames[i];
+
+ if (oldname && strcmp(oldname, colname) == 0)
+ return false;
+ }
+
+ /*
+ * If we're building a new_colnames array, check that too (this will be
+ * partially but not completely redundant with the previous checks)
+ */
+ for (i = 0; i < colinfo->num_new_cols; i++)
+ {
+ char *oldname = colinfo->new_colnames[i];
+
+ if (oldname && strcmp(oldname, colname) == 0)
+ return false;
+ }
+
+ /* Also check against USING-column names that must be globally unique */
+ foreach(lc, dpns->using_names)
+ {
+ char *oldname = (char *) lfirst(lc);
+
+ if (strcmp(oldname, colname) == 0)
+ return false;
+ }
+
+ /* Also check against names already assigned for parent-join USING cols */
+ foreach(lc, colinfo->parentUsing)
+ {
+ char *oldname = (char *) lfirst(lc);
+
+ if (strcmp(oldname, colname) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * make_colname_unique: modify colname if necessary to make it unique
+ *
+ * dpns is query-wide info, colinfo is for the column's RTE
+ */
+static char *
+make_colname_unique(char *colname, deparse_namespace *dpns,
+ deparse_columns *colinfo)
+{
+ /*
+ * If the selected name isn't unique, append digits to make it so. For a
+ * very long input name, we might have to truncate to stay within
+ * NAMEDATALEN.
+ */
+ if (!colname_is_unique(colname, dpns, colinfo))
+ {
+ int colnamelen = strlen(colname);
+ char *modname = (char *) palloc(colnamelen + 16);
+ int i = 0;
+
+ do
+ {
+ i++;
+ for (;;)
+ {
+ memcpy(modname, colname, colnamelen);
+ sprintf(modname + colnamelen, "_%d", i);
+ if (strlen(modname) < NAMEDATALEN)
+ break;
+ /* drop chars from colname to keep all the digits */
+ colnamelen = pg_mbcliplen(colname, colnamelen,
+ colnamelen - 1);
+ }
+ } while (!colname_is_unique(modname, dpns, colinfo));
+ colname = modname;
+ }
+ return colname;
+}
+
+/*
+ * expand_colnames_array_to: make colinfo->colnames at least n items long
+ *
+ * Any added array entries are initialized to zero.
+ */
+static void
+expand_colnames_array_to(deparse_columns *colinfo, int n)
+{
+ if (n > colinfo->num_cols)
+ {
+ if (colinfo->colnames == NULL)
+ colinfo->colnames = palloc0_array(char *, n);
+ else
+ {
+ colinfo->colnames = repalloc0_array(colinfo->colnames, char *, colinfo->num_cols, n);
+ }
+ colinfo->num_cols = n;
+ }
+}
+
+/*
+ * identify_join_columns: figure out where columns of a join come from
+ *
+ * Fills the join-specific fields of the colinfo struct, except for
+ * usingNames which is filled later.
+ */
+static void
+identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
+ deparse_columns *colinfo)
+{
+ int numjoincols;
+ int jcolno;
+ int rcolno;
+ ListCell *lc;
+
+ /* Extract left/right child RT indexes */
+ if (IsA(j->larg, RangeTblRef))
+ colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex;
+ else if (IsA(j->larg, JoinExpr))
+ colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex;
+ else
+ elog(ERROR, "unrecognized node type in jointree: %d",
+ (int) nodeTag(j->larg));
+ if (IsA(j->rarg, RangeTblRef))
+ colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex;
+ else if (IsA(j->rarg, JoinExpr))
+ colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex;
+ else
+ elog(ERROR, "unrecognized node type in jointree: %d",
+ (int) nodeTag(j->rarg));
+
+ /* Assert children will be processed earlier than join in second pass */
+ Assert(colinfo->leftrti < j->rtindex);
+ Assert(colinfo->rightrti < j->rtindex);
+
+ /* Initialize result arrays with zeroes */
+ numjoincols = list_length(jrte->joinaliasvars);
+ Assert(numjoincols == list_length(jrte->eref->colnames));
+ colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int));
+ colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int));
+
+ /*
+ * Deconstruct RTE's joinleftcols/joinrightcols into desired format.
+ * Recall that the column(s) merged due to USING are the first column(s)
+ * of the join output. We need not do anything special while scanning
+ * joinleftcols, but while scanning joinrightcols we must distinguish
+ * merged from unmerged columns.
+ */
+ jcolno = 0;
+ foreach(lc, jrte->joinleftcols)
+ {
+ int leftattno = lfirst_int(lc);
+
+ colinfo->leftattnos[jcolno++] = leftattno;
+ }
+ rcolno = 0;
+ foreach(lc, jrte->joinrightcols)
+ {
+ int rightattno = lfirst_int(lc);
+
+ if (rcolno < jrte->joinmergedcols) /* merged column? */
+ colinfo->rightattnos[rcolno] = rightattno;
+ else
+ colinfo->rightattnos[jcolno++] = rightattno;
+ rcolno++;
+ }
+ Assert(jcolno == numjoincols);
+}
+
+/*
+ * get_rtable_name: convenience function to get a previously assigned RTE alias
+ *
+ * The RTE must belong to the topmost namespace level in "context".
+ */
+static char *
+get_rtable_name(int rtindex, deparse_context *context)
+{
+ deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces);
+
+ Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names));
+ return (char *) list_nth(dpns->rtable_names, rtindex - 1);
+}
+
+/*
+ * set_deparse_plan: set up deparse_namespace to parse subexpressions
+ * of a given Plan node
+ *
+ * This sets the plan, outer_planstate, inner_planstate, outer_tlist,
+ * inner_tlist, and index_tlist fields. Caller is responsible for adjusting
+ * the ancestors list if necessary. Note that the rtable and ctes fields do
+ * not need to change when shifting attention to different plan nodes in a
+ * single plan tree.
+ */
+static void
+set_deparse_plan(deparse_namespace *dpns, Plan *plan)
+{
+ dpns->plan = plan;
+
+ /*
+ * We special-case Append and MergeAppend to pretend that the first child
+ * plan is the OUTER referent; we have to interpret OUTER Vars in their
+ * tlists according to one of the children, and the first one is the most
+ * natural choice.
+ */
+ if (IsA(plan, Append))
+ dpns->outer_plan = linitial(((Append *) plan)->appendplans);
+ else if (IsA(plan, MergeAppend))
+ dpns->outer_plan = linitial(((MergeAppend *) plan)->mergeplans);
+ else
+ dpns->outer_plan = outerPlan(plan);
+
+ if (dpns->outer_plan)
+ dpns->outer_tlist = dpns->outer_plan->targetlist;
+ else
+ dpns->outer_tlist = NIL;
+
+ /*
+ * For a SubqueryScan, pretend the subplan is INNER referent. (We don't
+ * use OUTER because that could someday conflict with the normal meaning.)
+ * Likewise, for a CteScan, pretend the subquery's plan is INNER referent.
+ * For a WorkTableScan, locate the parent RecursiveUnion plan node and use
+ * that as INNER referent.
+ *
+ * For MERGE, make the inner tlist point to the merge source tlist, which
+ * is same as the targetlist that the ModifyTable's source plan provides.
+ * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the
+ * excluded expression's tlist. (Similar to the SubqueryScan we don't want
+ * to reuse OUTER, it's used for RETURNING in some modify table cases,
+ * although not INSERT .. CONFLICT).
+ */
+ if (IsA(plan, SubqueryScan))
+ dpns->inner_plan = ((SubqueryScan *) plan)->subplan;
+ else if (IsA(plan, CteScan))
+ dpns->inner_plan = list_nth(dpns->subplans,
+ ((CteScan *) plan)->ctePlanId - 1);
+ else if (IsA(plan, WorkTableScan))
+ dpns->inner_plan = find_recursive_union(dpns,
+ (WorkTableScan *) plan);
+ else if (IsA(plan, ModifyTable))
+ dpns->inner_plan = plan;
+ else
+ dpns->inner_plan = innerPlan(plan);
+
+ if (IsA(plan, ModifyTable))
+ {
+ if (((ModifyTable *) plan)->operation == CMD_MERGE)
+ dpns->inner_tlist = dpns->outer_tlist;
+ else
+ dpns->inner_tlist = ((ModifyTable *) plan)->exclRelTlist;
+ }
+ else if (dpns->inner_plan)
+ dpns->inner_tlist = dpns->inner_plan->targetlist;
+ else
+ dpns->inner_tlist = NIL;
+
+ /* Set up referent for INDEX_VAR Vars, if needed */
+ if (IsA(plan, IndexOnlyScan))
+ dpns->index_tlist = ((IndexOnlyScan *) plan)->indextlist;
+ else if (IsA(plan, ForeignScan))
+ dpns->index_tlist = ((ForeignScan *) plan)->fdw_scan_tlist;
+ else if (IsA(plan, CustomScan))
+ dpns->index_tlist = ((CustomScan *) plan)->custom_scan_tlist;
+ else
+ dpns->index_tlist = NIL;
+}
+
+/*
+ * Locate the ancestor plan node that is the RecursiveUnion generating
+ * the WorkTableScan's work table. We can match on wtParam, since that
+ * should be unique within the plan tree.
+ */
+static Plan *
+find_recursive_union(deparse_namespace *dpns, WorkTableScan *wtscan)
+{
+ ListCell *lc;
+
+ foreach(lc, dpns->ancestors)
+ {
+ Plan *ancestor = (Plan *) lfirst(lc);
+
+ if (IsA(ancestor, RecursiveUnion) &&
+ ((RecursiveUnion *) ancestor)->wtParam == wtscan->wtParam)
+ return ancestor;
+ }
+ elog(ERROR, "could not find RecursiveUnion for WorkTableScan with wtParam %d",
+ wtscan->wtParam);
+ return NULL;
+}
+
+/*
+ * push_child_plan: temporarily transfer deparsing attention to a child plan
+ *
+ * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the
+ * deparse context in case the referenced expression itself uses
+ * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid
+ * affecting levelsup issues (although in a Plan tree there really shouldn't
+ * be any).
+ *
+ * Caller must provide a local deparse_namespace variable to save the
+ * previous state for pop_child_plan.
+ */
+static void
+push_child_plan(deparse_namespace *dpns, Plan *plan,
+ deparse_namespace *save_dpns)
+{
+ /* Save state for restoration later */
+ *save_dpns = *dpns;
+
+ /* Link current plan node into ancestors list */
+ dpns->ancestors = lcons(dpns->plan, dpns->ancestors);
+
+ /* Set attention on selected child */
+ set_deparse_plan(dpns, plan);
+}
+
+/*
+ * pop_child_plan: undo the effects of push_child_plan
+ */
+static void
+pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns)
+{
+ List *ancestors;
+
+ /* Get rid of ancestors list cell added by push_child_plan */
+ ancestors = list_delete_first(dpns->ancestors);
+
+ /* Restore fields changed by push_child_plan */
+ *dpns = *save_dpns;
+
+ /* Make sure dpns->ancestors is right (may be unnecessary) */
+ dpns->ancestors = ancestors;
+}
+
+/*
+ * push_ancestor_plan: temporarily transfer deparsing attention to an
+ * ancestor plan
+ *
+ * When expanding a Param reference, we must adjust the deparse context
+ * to match the plan node that contains the expression being printed;
+ * otherwise we'd fail if that expression itself contains a Param or
+ * OUTER_VAR/INNER_VAR/INDEX_VAR variable.
+ *
+ * The target ancestor is conveniently identified by the ListCell holding it
+ * in dpns->ancestors.
+ *
+ * Caller must provide a local deparse_namespace variable to save the
+ * previous state for pop_ancestor_plan.
+ */
+static void
+push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell,
+ deparse_namespace *save_dpns)
+{
+ Plan *plan = (Plan *) lfirst(ancestor_cell);
+
+ /* Save state for restoration later */
+ *save_dpns = *dpns;
+
+ /* Build a new ancestor list with just this node's ancestors */
+ dpns->ancestors =
+ list_copy_tail(dpns->ancestors,
+ list_cell_number(dpns->ancestors, ancestor_cell) + 1);
+
+ /* Set attention on selected ancestor */
+ set_deparse_plan(dpns, plan);
+}
+
+/*
+ * pop_ancestor_plan: undo the effects of push_ancestor_plan
+ */
+static void
+pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns)
+{
+ /* Free the ancestor list made in push_ancestor_plan */
+ list_free(dpns->ancestors);
+
+ /* Restore fields changed by push_ancestor_plan */
+ *dpns = *save_dpns;
+}
+
+/* ----------
+ * deparse_shard_query - Parse back a query for execution on a shard
+ *
+ * Builds an SQL string to perform the provided query on a specific shard and
+ * places this string into the provided buffer.
+ * ----------
+ */
+void
+deparse_shard_query(Query *query, Oid distrelid, int64 shardid,
+ StringInfo buffer)
+{
+ get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL,
+ false,
+ 0, WRAP_COLUMN_DEFAULT, 0);
+}
+
+/* ----------
+ * get_query_def - Parse back one query parsetree
+ *
+ * query: parsetree to be displayed
+ * buf: output text is appended to buf
+ * parentnamespace: list (initially empty) of outer-level deparse_namespace's
+ * resultDesc: if not NULL, the output tuple descriptor for the view
+ * represented by a SELECT query. We use the column names from it
+ * to label SELECT output columns, in preference to names in the query
+ * colNamesVisible: true if the surrounding context cares about the output
+ * column names at all (as, for example, an EXISTS() context does not);
+ * when false, we can suppress dummy column labels such as "?column?"
+ * prettyFlags: bitmask of PRETTYFLAG_XXX options
+ * wrapColumn: maximum line length, or -1 to disable wrapping
+ * startIndent: initial indentation amount
+ * ----------
+ */
+static void
+get_query_def(Query *query, StringInfo buf, List *parentnamespace,
+ TupleDesc resultDesc, bool colNamesVisible,
+ int prettyFlags, int wrapColumn, int startIndent)
+{
+ get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc,
+ colNamesVisible,
+ prettyFlags, wrapColumn, startIndent);
+}
+
+/* ----------
+ * get_query_def_extended - Parse back one query parsetree, optionally
+ * with extension using a shard identifier.
+ *
+ * If distrelid is valid and shardid is positive, the provided shardid is added
+ * any time the provided relid is deparsed, so that the query may be executed
+ * on a placement for the given shard.
+ * ----------
+ */
+static void
+get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace,
+ Oid distrelid, int64 shardid, TupleDesc resultDesc,
+ bool colNamesVisible,
+ int prettyFlags, int wrapColumn, int startIndent)
+{
+ deparse_context context;
+ deparse_namespace dpns;
+
+ /* Guard against excessively long or deeply-nested queries */
+ CHECK_FOR_INTERRUPTS();
+ check_stack_depth();
+
+ /*
+ * Before we begin to examine the query, acquire locks on referenced
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
+ * querytree!
+ *
+ * We are only deparsing the query (we are not about to execute it), so we
+ * only need AccessShareLock on the relations it mentions.
+ */
+ AcquireRewriteLocks(query, false, false);
+
+ /*
+ * Set search_path to NIL so that all objects outside of pg_catalog will be
+ * schema-prefixed. pg_catalog will be added automatically when we call
+ * PushEmptySearchPath().
+ */
+ int saveNestLevel = PushEmptySearchPath();
+
+ context.buf = buf;
+ context.namespaces = lcons(&dpns, list_copy(parentnamespace));
+ context.windowClause = NIL;
+ context.windowTList = NIL;
+ context.varprefix = (parentnamespace != NIL ||
+ list_length(query->rtable) != 1);
+ context.prettyFlags = prettyFlags;
+ context.wrapColumn = wrapColumn;
+ context.indentLevel = startIndent;
+ context.special_exprkind = EXPR_KIND_NONE;
+ context.appendparents = NULL;
+ context.distrelid = distrelid;
+ context.shardid = shardid;
+
+ set_deparse_for_query(&dpns, query, parentnamespace);
+
+ switch (query->commandType)
+ {
+ case CMD_SELECT:
+ get_select_query_def(query, &context, resultDesc, colNamesVisible);
+ break;
+
+ case CMD_UPDATE:
+ get_update_query_def(query, &context, colNamesVisible);
+ break;
+
+ case CMD_INSERT:
+ get_insert_query_def(query, &context, colNamesVisible);
+ break;
+
+ case CMD_DELETE:
+ get_delete_query_def(query, &context, colNamesVisible);
+ break;
+
+ case CMD_MERGE:
+ get_merge_query_def(query, &context, colNamesVisible);
+ break;
+
+ case CMD_NOTHING:
+ appendStringInfoString(buf, "NOTHING");
+ break;
+
+ case CMD_UTILITY:
+ get_utility_query_def(query, &context);
+ break;
+
+ default:
+ elog(ERROR, "unrecognized query command type: %d",
+ query->commandType);
+ break;
+ }
+
+ /* revert back to original search_path */
+ PopEmptySearchPath(saveNestLevel);
+}
+
+/* ----------
+ * get_values_def - Parse back a VALUES list
+ * ----------
+ */
+static void
+get_values_def(List *values_lists, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ bool first_list = true;
+ ListCell *vtl;
+
+ appendStringInfoString(buf, "VALUES ");
+
+ foreach(vtl, values_lists)
+ {
+ List *sublist = (List *) lfirst(vtl);
+ bool first_col = true;
+ ListCell *lc;
+
+ if (first_list)
+ first_list = false;
+ else
+ appendStringInfoString(buf, ", ");
+
+ appendStringInfoChar(buf, '(');
+ foreach(lc, sublist)
+ {
+ Node *col = (Node *) lfirst(lc);
+
+ if (first_col)
+ first_col = false;
+ else
+ appendStringInfoChar(buf, ',');
+
+ /*
+ * Print the value. Whole-row Vars need special treatment.
+ */
+ get_rule_expr_toplevel(col, context, false);
+ }
+ appendStringInfoChar(buf, ')');
+ }
+}
+
+/* ----------
+ * get_with_clause - Parse back a WITH clause
+ * ----------
+ */
+static void
+get_with_clause(Query *query, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ const char *sep;
+ ListCell *l;
+
+ if (query->cteList == NIL)
+ return;
+
+ if (PRETTY_INDENT(context))
+ {
+ context->indentLevel += PRETTYINDENT_STD;
+ appendStringInfoChar(buf, ' ');
+ }
+
+ if (query->hasRecursive)
+ sep = "WITH RECURSIVE ";
+ else
+ sep = "WITH ";
+ foreach(l, query->cteList)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(l);
+
+ appendStringInfoString(buf, sep);
+ appendStringInfoString(buf, quote_identifier(cte->ctename));
+ if (cte->aliascolnames)
+ {
+ bool first = true;
+ ListCell *col;
+
+ appendStringInfoChar(buf, '(');
+ foreach(col, cte->aliascolnames)
+ {
+ if (first)
+ first = false;
+ else
+ appendStringInfoString(buf, ", ");
+ appendStringInfoString(buf,
+ quote_identifier(strVal(lfirst(col))));
+ }
+ appendStringInfoChar(buf, ')');
+ }
+ appendStringInfoString(buf, " AS ");
+ switch (cte->ctematerialized)
+ {
+ case CTEMaterializeDefault:
+ break;
+ case CTEMaterializeAlways:
+ appendStringInfoString(buf, "MATERIALIZED ");
+ break;
+ case CTEMaterializeNever:
+ appendStringInfoString(buf, "NOT MATERIALIZED ");
+ break;
+ }
+ appendStringInfoChar(buf, '(');
+ if (PRETTY_INDENT(context))
+ appendContextKeyword(context, "", 0, 0, 0);
+ get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL,
+ true,
+ context->prettyFlags, context->wrapColumn,
+ context->indentLevel);
+ if (PRETTY_INDENT(context))
+ appendContextKeyword(context, "", 0, 0, 0);
+ appendStringInfoChar(buf, ')');
+
+ if (cte->search_clause)
+ {
+ bool first = true;
+ ListCell *lc;
+
+ appendStringInfo(buf, " SEARCH %s FIRST BY ",
+ cte->search_clause->search_breadth_first ? "BREADTH" : "DEPTH");
+
+ foreach(lc, cte->search_clause->search_col_list)
+ {
+ if (first)
+ first = false;
+ else
+ appendStringInfoString(buf, ", ");
+ appendStringInfoString(buf,
+ quote_identifier(strVal(lfirst(lc))));
+ }
+
+ appendStringInfo(buf, " SET %s", quote_identifier(cte->search_clause->search_seq_column));
+ }
+
+ if (cte->cycle_clause)
+ {
+ bool first = true;
+ ListCell *lc;
+
+ appendStringInfoString(buf, " CYCLE ");
+
+ foreach(lc, cte->cycle_clause->cycle_col_list)
+ {
+ if (first)
+ first = false;
+ else
+ appendStringInfoString(buf, ", ");
+ appendStringInfoString(buf,
+ quote_identifier(strVal(lfirst(lc))));
+ }
+
+ appendStringInfo(buf, " SET %s", quote_identifier(cte->cycle_clause->cycle_mark_column));
+
+ {
+ Const *cmv = castNode(Const, cte->cycle_clause->cycle_mark_value);
+ Const *cmd = castNode(Const, cte->cycle_clause->cycle_mark_default);
+
+ if (!(cmv->consttype == BOOLOID && !cmv->constisnull && DatumGetBool(cmv->constvalue) == true &&
+ cmd->consttype == BOOLOID && !cmd->constisnull && DatumGetBool(cmd->constvalue) == false))
+ {
+ appendStringInfoString(buf, " TO ");
+ get_rule_expr(cte->cycle_clause->cycle_mark_value, context, false);
+ appendStringInfoString(buf, " DEFAULT ");
+ get_rule_expr(cte->cycle_clause->cycle_mark_default, context, false);
+ }
+ }
+
+ appendStringInfo(buf, " USING %s", quote_identifier(cte->cycle_clause->cycle_path_column));
+ }
+
+ sep = ", ";
+ }
+
+ if (PRETTY_INDENT(context))
+ {
+ context->indentLevel -= PRETTYINDENT_STD;
+ appendContextKeyword(context, "", 0, 0, 0);
+ }
+ else
+ appendStringInfoChar(buf, ' ');
+}
+
+/* ----------
+ * get_select_query_def - Parse back a SELECT parsetree
+ * ----------
+ */
+static void
+get_select_query_def(Query *query, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ List *save_windowclause;
+ List *save_windowtlist;
+ bool force_colno;
+ ListCell *l;
+
+ /* Insert the WITH clause if given */
+ get_with_clause(query, context);
+
+ /* Set up context for possible window functions */
+ save_windowclause = context->windowClause;
+ context->windowClause = query->windowClause;
+ save_windowtlist = context->windowTList;
+ context->windowTList = query->targetList;
+
+ /*
+ * If the Query node has a setOperations tree, then it's the top level of
+ * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT
+ * fields are interesting in the top query itself.
+ */
+ if (query->setOperations)
+ {
+ get_setop_query(query->setOperations, query, context, resultDesc,
+ colNamesVisible);
+ /* ORDER BY clauses must be simple in this case */
+ force_colno = true;
+ }
+ else
+ {
+ get_basic_select_query(query, context, resultDesc, colNamesVisible);
+ force_colno = false;
+ }
+
+ /* Add the ORDER BY clause if given */
+ if (query->sortClause != NIL)
+ {
+ appendContextKeyword(context, " ORDER BY ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_orderby(query->sortClause, query->targetList,
+ force_colno, context);
+ }
+
+ /*
+ * Add the LIMIT/OFFSET clauses if given. If non-default options, use the
+ * standard spelling of LIMIT.
+ */
+ if (query->limitOffset != NULL)
+ {
+ appendContextKeyword(context, " OFFSET ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ get_rule_expr(query->limitOffset, context, false);
+ }
+ if (query->limitCount != NULL)
+ {
+ if (query->limitOption == LIMIT_OPTION_WITH_TIES)
+ {
+ // had to add '(' and ')' here because it fails with casting
+ appendContextKeyword(context, " FETCH FIRST (",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ get_rule_expr(query->limitCount, context, false);
+ appendStringInfoString(buf, ") ROWS WITH TIES");
+ }
+ else
+ {
+ appendContextKeyword(context, " LIMIT ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ if (IsA(query->limitCount, Const) &&
+ ((Const *) query->limitCount)->constisnull)
+ appendStringInfoString(buf, "ALL");
+ else
+ get_rule_expr(query->limitCount, context, false);
+ }
+ }
+
+ /* Add FOR [KEY] UPDATE/SHARE clauses if present */
+ if (query->hasForUpdate)
+ {
+ foreach(l, query->rowMarks)
+ {
+ RowMarkClause *rc = (RowMarkClause *) lfirst(l);
+
+ /* don't print implicit clauses */
+ if (rc->pushedDown)
+ continue;
+
+ switch (rc->strength)
+ {
+ case LCS_NONE:
+ /* we intentionally throw an error for LCS_NONE */
+ elog(ERROR, "unrecognized LockClauseStrength %d",
+ (int) rc->strength);
+ break;
+ case LCS_FORKEYSHARE:
+ appendContextKeyword(context, " FOR KEY SHARE",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ break;
+ case LCS_FORSHARE:
+ appendContextKeyword(context, " FOR SHARE",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ break;
+ case LCS_FORNOKEYUPDATE:
+ appendContextKeyword(context, " FOR NO KEY UPDATE",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ break;
+ case LCS_FORUPDATE:
+ appendContextKeyword(context, " FOR UPDATE",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ break;
+ }
+
+ appendStringInfo(buf, " OF %s",
+ quote_identifier(get_rtable_name(rc->rti,
+ context)));
+ if (rc->waitPolicy == LockWaitError)
+ appendStringInfoString(buf, " NOWAIT");
+ else if (rc->waitPolicy == LockWaitSkip)
+ appendStringInfoString(buf, " SKIP LOCKED");
+ }
+ }
+
+ context->windowClause = save_windowclause;
+ context->windowTList = save_windowtlist;
+}
+
+/*
+ * Detect whether query looks like SELECT ... FROM VALUES();
+ * if so, return the VALUES RTE. Otherwise return NULL.
+ */
+static RangeTblEntry *
+get_simple_values_rte(Query *query, TupleDesc resultDesc)
+{
+ RangeTblEntry *result = NULL;
+ ListCell *lc;
+ int colno;
+
+ /*
+ * We want to return true even if the Query also contains OLD or NEW rule
+ * RTEs. So the idea is to scan the rtable and see if there is only one
+ * inFromCl RTE that is a VALUES RTE.
+ */
+ foreach(lc, query->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+
+ if (rte->rtekind == RTE_VALUES && rte->inFromCl)
+ {
+ if (result)
+ return NULL; /* multiple VALUES (probably not possible) */
+ result = rte;
+ }
+ else if (rte->rtekind == RTE_RELATION && !rte->inFromCl)
+ continue; /* ignore rule entries */
+ else
+ return NULL; /* something else -> not simple VALUES */
+ }
+
+ /*
+ * We don't need to check the targetlist in any great detail, because
+ * parser/analyze.c will never generate a "bare" VALUES RTE --- they only
+ * appear inside auto-generated sub-queries with very restricted
+ * structure. However, DefineView might have modified the tlist by
+ * injecting new column aliases; so compare tlist resnames against the
+ * RTE's names to detect that.
+ */
+ if (result)
+ {
+ ListCell *lcn;
+
+ if (list_length(query->targetList) != list_length(result->eref->colnames))
+ return NULL; /* this probably cannot happen */
+ colno = 0;
+ forboth(lc, query->targetList, lcn, result->eref->colnames)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc);
+ char *cname = strVal(lfirst(lcn));
+ char *colname;
+
+ if (tle->resjunk)
+ return NULL; /* this probably cannot happen */
+ /* compute name that get_target_list would use for column */
+ colno++;
+ if (resultDesc && colno <= resultDesc->natts)
+ colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname);
+ else
+ colname = tle->resname;
+
+ /* does it match the VALUES RTE? */
+ if (colname == NULL || strcmp(colname, cname) != 0)
+ return NULL; /* column name has been changed */
+ }
+ }
+
+ return result;
+}
+
+static void
+get_basic_select_query(Query *query, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ RangeTblEntry *values_rte;
+ char *sep;
+ ListCell *l;
+
+ if (PRETTY_INDENT(context))
+ {
+ context->indentLevel += PRETTYINDENT_STD;
+ appendStringInfoChar(buf, ' ');
+ }
+
+ /*
+ * If the query looks like SELECT * FROM (VALUES ...), then print just the
+ * VALUES part. This reverses what transformValuesClause() did at parse
+ * time.
+ */
+ values_rte = get_simple_values_rte(query, resultDesc);
+ if (values_rte)
+ {
+ get_values_def(values_rte->values_lists, context);
+ return;
+ }
+
+ /*
+ * Build up the query string - first we say SELECT
+ */
+ if (query->isReturn)
+ appendStringInfoString(buf, "RETURN");
+ else
+ appendStringInfoString(buf, "SELECT");
+
+ /* Add the DISTINCT clause if given */
+ if (query->distinctClause != NIL)
+ {
+ if (query->hasDistinctOn)
+ {
+ appendStringInfoString(buf, " DISTINCT ON (");
+ sep = "";
+ foreach(l, query->distinctClause)
+ {
+ SortGroupClause *srt = (SortGroupClause *) lfirst(l);
+
+ appendStringInfoString(buf, sep);
+ get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList,
+ false, context);
+ sep = ", ";
+ }
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ appendStringInfoString(buf, " DISTINCT");
+ }
+
+ /* Then we tell what to select (the targetlist) */
+ get_target_list(query->targetList, context, resultDesc, colNamesVisible);
+
+ /* Add the FROM clause if needed */
+ get_from_clause(query, " FROM ", context);
+
+ /* Add the WHERE clause if given */
+ if (query->jointree->quals != NULL)
+ {
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(query->jointree->quals, context, false);
+ }
+
+ /* Add the GROUP BY clause if given */
+ if (query->groupClause != NULL || query->groupingSets != NULL)
+ {
+ ParseExprKind save_exprkind;
+
+ appendContextKeyword(context, " GROUP BY ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ if (query->groupDistinct)
+ appendStringInfoString(buf, "DISTINCT ");
+
+ save_exprkind = context->special_exprkind;
+ context->special_exprkind = EXPR_KIND_GROUP_BY;
+
+ if (query->groupingSets == NIL)
+ {
+ sep = "";
+ foreach(l, query->groupClause)
+ {
+ SortGroupClause *grp = (SortGroupClause *) lfirst(l);
+
+ appendStringInfoString(buf, sep);
+ get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList,
+ false, context);
+ sep = ", ";
+ }
+ }
+ else
+ {
+ sep = "";
+ foreach(l, query->groupingSets)
+ {
+ GroupingSet *grp = lfirst(l);
+
+ appendStringInfoString(buf, sep);
+ get_rule_groupingset(grp, query->targetList, true, context);
+ sep = ", ";
+ }
+ }
+
+ context->special_exprkind = save_exprkind;
+ }
+
+ /* Add the HAVING clause if given */
+ if (query->havingQual != NULL)
+ {
+ appendContextKeyword(context, " HAVING ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ get_rule_expr(query->havingQual, context, false);
+ }
+
+ /* Add the WINDOW clause if needed */
+ if (query->windowClause != NIL)
+ get_rule_windowclause(query, context);
+}
+
+/* ----------
+ * get_target_list - Parse back a SELECT target list
+ *
+ * This is also used for RETURNING lists in INSERT/UPDATE/DELETE.
+ *
+ * resultDesc and colNamesVisible are as for get_query_def()
+ * ----------
+ */
+static void
+get_target_list(List *targetList, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ StringInfoData targetbuf;
+ bool last_was_multiline = false;
+ char *sep;
+ int colno;
+ ListCell *l;
+
+ /* we use targetbuf to hold each TLE's text temporarily */
+ initStringInfo(&targetbuf);
+
+ sep = " ";
+ colno = 0;
+ foreach(l, targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ char *colname;
+ char *attname;
+
+ if (tle->resjunk)
+ continue; /* ignore junk entries */
+
+ appendStringInfoString(buf, sep);
+ sep = ", ";
+ colno++;
+
+ /*
+ * Put the new field text into targetbuf so we can decide after we've
+ * got it whether or not it needs to go on a new line.
+ */
+ resetStringInfo(&targetbuf);
+ context->buf = &targetbuf;
+
+ /*
+ * We special-case Var nodes rather than using get_rule_expr. This is
+ * needed because get_rule_expr will display a whole-row Var as
+ * "foo.*", which is the preferred notation in most contexts, but at
+ * the top level of a SELECT list it's not right (the parser will
+ * expand that notation into multiple columns, yielding behavior
+ * different from a whole-row Var). We need to call get_variable
+ * directly so that we can tell it to do the right thing, and so that
+ * we can get the attribute name which is the default AS label.
+ */
+ if (tle->expr && (IsA(tle->expr, Var)))
+ {
+ attname = get_variable((Var *) tle->expr, 0, true, context);
+ }
+ else
+ {
+ get_rule_expr((Node *) tle->expr, context, true);
+
+ /*
+ * When colNamesVisible is true, we should always show the
+ * assigned column name explicitly. Otherwise, show it only if
+ * it's not FigureColname's fallback.
+ */
+ attname = colNamesVisible ? NULL : "?column?";
+ }
+
+ /*
+ * Figure out what the result column should be called. In the context
+ * of a view, use the view's tuple descriptor (so as to pick up the
+ * effects of any column RENAME that's been done on the view).
+ * Otherwise, just use what we can find in the TLE.
+ */
+ if (resultDesc && colno <= resultDesc->natts)
+ colname = NameStr(TupleDescAttr(resultDesc, colno - 1)->attname);
+ else
+ colname = tle->resname;
+
+ /* Show AS unless the column's name is correct as-is */
+ if (colname) /* resname could be NULL */
+ {
+ if (attname == NULL || strcmp(attname, colname) != 0)
+ appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname));
+ }
+
+ /* Restore context's output buffer */
+ context->buf = buf;
+
+ /* Consider line-wrapping if enabled */
+ if (PRETTY_INDENT(context) && context->wrapColumn >= 0)
+ {
+ int leading_nl_pos;
+
+ /* Does the new field start with a new line? */
+ if (targetbuf.len > 0 && targetbuf.data[0] == '\n')
+ leading_nl_pos = 0;
+ else
+ leading_nl_pos = -1;
+
+ /* If so, we shouldn't add anything */
+ if (leading_nl_pos >= 0)
+ {
+ /* instead, remove any trailing spaces currently in buf */
+ removeStringInfoSpaces(buf);
+ }
+ else
+ {
+ char *trailing_nl;
+
+ /* Locate the start of the current line in the output buffer */
+ trailing_nl = strrchr(buf->data, '\n');
+ if (trailing_nl == NULL)
+ trailing_nl = buf->data;
+ else
+ trailing_nl++;
+
+ /*
+ * Add a newline, plus some indentation, if the new field is
+ * not the first and either the new field would cause an
+ * overflow or the last field used more than one line.
+ */
+ if (colno > 1 &&
+ ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) ||
+ last_was_multiline))
+ appendContextKeyword(context, "", -PRETTYINDENT_STD,
+ PRETTYINDENT_STD, PRETTYINDENT_VAR);
+ }
+
+ /* Remember this field's multiline status for next iteration */
+ last_was_multiline =
+ (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL);
+ }
+
+ /* Add the new field */
+ appendStringInfoString(buf, targetbuf.data);
+ }
+
+ /* clean up */
+ pfree(targetbuf.data);
+}
+
+static void
+get_setop_query(Node *setOp, Query *query, deparse_context *context,
+ TupleDesc resultDesc, bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ bool need_paren;
+
+ /* Guard against excessively long or deeply-nested queries */
+ CHECK_FOR_INTERRUPTS();
+ check_stack_depth();
+
+ if (IsA(setOp, RangeTblRef))
+ {
+ RangeTblRef *rtr = (RangeTblRef *) setOp;
+ RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable);
+ Query *subquery = rte->subquery;
+
+ Assert(subquery != NULL);
+ Assert(subquery->setOperations == NULL);
+ /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */
+ need_paren = (subquery->cteList ||
+ subquery->sortClause ||
+ subquery->rowMarks ||
+ subquery->limitOffset ||
+ subquery->limitCount);
+ if (need_paren)
+ appendStringInfoChar(buf, '(');
+ get_query_def(subquery, buf, context->namespaces, resultDesc,
+ colNamesVisible,
+ context->prettyFlags, context->wrapColumn,
+ context->indentLevel);
+ if (need_paren)
+ appendStringInfoChar(buf, ')');
+ }
+ else if (IsA(setOp, SetOperationStmt))
+ {
+ SetOperationStmt *op = (SetOperationStmt *) setOp;
+ int subindent;
+
+ /*
+ * We force parens when nesting two SetOperationStmts, except when the
+ * lefthand input is another setop of the same kind. Syntactically,
+ * we could omit parens in rather more cases, but it seems best to use
+ * parens to flag cases where the setop operator changes. If we use
+ * parens, we also increase the indentation level for the child query.
+ *
+ * There are some cases in which parens are needed around a leaf query
+ * too, but those are more easily handled at the next level down (see
+ * code above).
+ */
+ if (IsA(op->larg, SetOperationStmt))
+ {
+ SetOperationStmt *lop = (SetOperationStmt *) op->larg;
+
+ if (op->op == lop->op && op->all == lop->all)
+ need_paren = false;
+ else
+ need_paren = true;
+ }
+ else
+ need_paren = false;
+
+ if (need_paren)
+ {
+ appendStringInfoChar(buf, '(');
+ subindent = PRETTYINDENT_STD;
+ appendContextKeyword(context, "", subindent, 0, 0);
+ }
+ else
+ subindent = 0;
+
+ get_setop_query(op->larg, query, context, resultDesc, colNamesVisible);
+
+ if (need_paren)
+ appendContextKeyword(context, ") ", -subindent, 0, 0);
+ else if (PRETTY_INDENT(context))
+ appendContextKeyword(context, "", -subindent, 0, 0);
+ else
+ appendStringInfoChar(buf, ' ');
+
+ switch (op->op)
+ {
+ case SETOP_UNION:
+ appendStringInfoString(buf, "UNION ");
+ break;
+ case SETOP_INTERSECT:
+ appendStringInfoString(buf, "INTERSECT ");
+ break;
+ case SETOP_EXCEPT:
+ appendStringInfoString(buf, "EXCEPT ");
+ break;
+ default:
+ elog(ERROR, "unrecognized set op: %d",
+ (int) op->op);
+ }
+ if (op->all)
+ appendStringInfoString(buf, "ALL ");
+
+ /* Always parenthesize if RHS is another setop */
+ need_paren = IsA(op->rarg, SetOperationStmt);
+
+ /*
+ * The indentation code here is deliberately a bit different from that
+ * for the lefthand input, because we want the line breaks in
+ * different places.
+ */
+ if (need_paren)
+ {
+ appendStringInfoChar(buf, '(');
+ subindent = PRETTYINDENT_STD;
+ }
+ else
+ subindent = 0;
+ appendContextKeyword(context, "", subindent, 0, 0);
+
+ get_setop_query(op->rarg, query, context, resultDesc, false);
+
+ if (PRETTY_INDENT(context))
+ context->indentLevel -= subindent;
+ if (need_paren)
+ appendContextKeyword(context, ")", 0, 0, 0);
+ }
+ else
+ {
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(setOp));
+ }
+}
+
+/*
+ * Display a sort/group clause.
+ *
+ * Also returns the expression tree, so caller need not find it again.
+ */
+static Node *
+get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
+ deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ TargetEntry *tle;
+ Node *expr;
+
+ tle = get_sortgroupref_tle(ref, tlist);
+ expr = (Node *) tle->expr;
+
+ /*
+ * Use column-number form if requested by caller. Otherwise, if
+ * expression is a constant, force it to be dumped with an explicit cast
+ * as decoration --- this is because a simple integer constant is
+ * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
+ * dump it without any decoration. If it's anything more complex than a
+ * simple Var, then force extra parens around it, to ensure it can't be
+ * misinterpreted as a cube() or rollup() construct.
+ */
+ if (force_colno)
+ {
+ Assert(!tle->resjunk);
+ appendStringInfo(buf, "%d", tle->resno);
+ }
+ else if (expr && IsA(expr, Const))
+ get_const_expr((Const *) expr, context, 1);
+ else if (!expr || IsA(expr, Var))
+ get_rule_expr(expr, context, true);
+ else
+ {
+ /*
+ * We must force parens for function-like expressions even if
+ * PRETTY_PAREN is off, since those are the ones in danger of
+ * misparsing. For other expressions we need to force them only if
+ * PRETTY_PAREN is on, since otherwise the expression will output them
+ * itself. (We can't skip the parens.)
+ */
+ bool need_paren = (PRETTY_PAREN(context)
+ || IsA(expr, FuncExpr)
+ || IsA(expr, Aggref)
+ || IsA(expr, WindowFunc)
+ || IsA(expr, JsonConstructorExpr));
+
+ if (need_paren)
+ appendStringInfoChar(context->buf, '(');
+ get_rule_expr(expr, context, true);
+ if (need_paren)
+ appendStringInfoChar(context->buf, ')');
+ }
+
+ return expr;
+}
+
+/*
+ * Display a GroupingSet
+ */
+static void
+get_rule_groupingset(GroupingSet *gset, List *targetlist,
+ bool omit_parens, deparse_context *context)
+{
+ ListCell *l;
+ StringInfo buf = context->buf;
+ bool omit_child_parens = true;
+ char *sep = "";
+
+ switch (gset->kind)
+ {
+ case GROUPING_SET_EMPTY:
+ appendStringInfoString(buf, "()");
+ return;
+
+ case GROUPING_SET_SIMPLE:
+ {
+ if (!omit_parens || list_length(gset->content) != 1)
+ appendStringInfoChar(buf, '(');
+
+ foreach(l, gset->content)
+ {
+ Index ref = lfirst_int(l);
+
+ appendStringInfoString(buf, sep);
+ get_rule_sortgroupclause(ref, targetlist,
+ false, context);
+ sep = ", ";
+ }
+
+ if (!omit_parens || list_length(gset->content) != 1)
+ appendStringInfoChar(buf, ')');
+ }
+ return;
+
+ case GROUPING_SET_ROLLUP:
+ appendStringInfoString(buf, "ROLLUP(");
+ break;
+ case GROUPING_SET_CUBE:
+ appendStringInfoString(buf, "CUBE(");
+ break;
+ case GROUPING_SET_SETS:
+ appendStringInfoString(buf, "GROUPING SETS (");
+ omit_child_parens = false;
+ break;
+ }
+
+ foreach(l, gset->content)
+ {
+ appendStringInfoString(buf, sep);
+ get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context);
+ sep = ", ";
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Display an ORDER BY list.
+ */
+static void
+get_rule_orderby(List *orderList, List *targetList,
+ bool force_colno, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ const char *sep;
+ ListCell *l;
+
+ sep = "";
+ foreach(l, orderList)
+ {
+ SortGroupClause *srt = (SortGroupClause *) lfirst(l);
+ Node *sortexpr;
+ Oid sortcoltype;
+ TypeCacheEntry *typentry;
+
+ appendStringInfoString(buf, sep);
+ sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList,
+ force_colno, context);
+ sortcoltype = exprType(sortexpr);
+ /* See whether operator is default < or > for datatype */
+ typentry = lookup_type_cache(sortcoltype,
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+ if (srt->sortop == typentry->lt_opr)
+ {
+ /* ASC is default, so emit nothing for it */
+ if (srt->nulls_first)
+ appendStringInfoString(buf, " NULLS FIRST");
+ }
+ else if (srt->sortop == typentry->gt_opr)
+ {
+ appendStringInfoString(buf, " DESC");
+ /* DESC defaults to NULLS FIRST */
+ if (!srt->nulls_first)
+ appendStringInfoString(buf, " NULLS LAST");
+ }
+ else
+ {
+ appendStringInfo(buf, " USING %s",
+ generate_operator_name(srt->sortop,
+ sortcoltype,
+ sortcoltype));
+ /* be specific to eliminate ambiguity */
+ if (srt->nulls_first)
+ appendStringInfoString(buf, " NULLS FIRST");
+ else
+ appendStringInfoString(buf, " NULLS LAST");
+ }
+ sep = ", ";
+ }
+}
+
+/*
+ * Display a WINDOW clause.
+ *
+ * Note that the windowClause list might contain only anonymous window
+ * specifications, in which case we should print nothing here.
+ */
+static void
+get_rule_windowclause(Query *query, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ const char *sep;
+ ListCell *l;
+
+ sep = NULL;
+ foreach(l, query->windowClause)
+ {
+ WindowClause *wc = (WindowClause *) lfirst(l);
+
+ if (wc->name == NULL)
+ continue; /* ignore anonymous windows */
+
+ if (sep == NULL)
+ appendContextKeyword(context, " WINDOW ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ else
+ appendStringInfoString(buf, sep);
+
+ appendStringInfo(buf, "%s AS ", quote_identifier(wc->name));
+
+ get_rule_windowspec(wc, query->targetList, context);
+
+ sep = ", ";
+ }
+}
+
+/*
+ * Display a window definition
+ */
+static void
+get_rule_windowspec(WindowClause *wc, List *targetList,
+ deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ bool needspace = false;
+ const char *sep;
+ ListCell *l;
+
+ appendStringInfoChar(buf, '(');
+ if (wc->refname)
+ {
+ appendStringInfoString(buf, quote_identifier(wc->refname));
+ needspace = true;
+ }
+ /* partition clauses are always inherited, so only print if no refname */
+ if (wc->partitionClause && !wc->refname)
+ {
+ if (needspace)
+ appendStringInfoChar(buf, ' ');
+ appendStringInfoString(buf, "PARTITION BY ");
+ sep = "";
+ foreach(l, wc->partitionClause)
+ {
+ SortGroupClause *grp = (SortGroupClause *) lfirst(l);
+
+ appendStringInfoString(buf, sep);
+ get_rule_sortgroupclause(grp->tleSortGroupRef, targetList,
+ false, context);
+ sep = ", ";
+ }
+ needspace = true;
+ }
+ /* print ordering clause only if not inherited */
+ if (wc->orderClause && !wc->copiedOrder)
+ {
+ if (needspace)
+ appendStringInfoChar(buf, ' ');
+ appendStringInfoString(buf, "ORDER BY ");
+ get_rule_orderby(wc->orderClause, targetList, false, context);
+ needspace = true;
+ }
+ /* framing clause is never inherited, so print unless it's default */
+ if (wc->frameOptions & FRAMEOPTION_NONDEFAULT)
+ {
+ if (needspace)
+ appendStringInfoChar(buf, ' ');
+ if (wc->frameOptions & FRAMEOPTION_RANGE)
+ appendStringInfoString(buf, "RANGE ");
+ else if (wc->frameOptions & FRAMEOPTION_ROWS)
+ appendStringInfoString(buf, "ROWS ");
+ else if (wc->frameOptions & FRAMEOPTION_GROUPS)
+ appendStringInfoString(buf, "GROUPS ");
+ else
+ Assert(false);
+ if (wc->frameOptions & FRAMEOPTION_BETWEEN)
+ appendStringInfoString(buf, "BETWEEN ");
+ if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
+ appendStringInfoString(buf, "UNBOUNDED PRECEDING ");
+ else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW)
+ appendStringInfoString(buf, "CURRENT ROW ");
+ else if (wc->frameOptions & FRAMEOPTION_START_OFFSET)
+ {
+ get_rule_expr(wc->startOffset, context, false);
+ if (wc->frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING)
+ appendStringInfoString(buf, " PRECEDING ");
+ else if (wc->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING)
+ appendStringInfoString(buf, " FOLLOWING ");
+ else
+ Assert(false);
+ }
+ else
+ Assert(false);
+ if (wc->frameOptions & FRAMEOPTION_BETWEEN)
+ {
+ appendStringInfoString(buf, "AND ");
+ if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
+ appendStringInfoString(buf, "UNBOUNDED FOLLOWING ");
+ else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW)
+ appendStringInfoString(buf, "CURRENT ROW ");
+ else if (wc->frameOptions & FRAMEOPTION_END_OFFSET)
+ {
+ get_rule_expr(wc->endOffset, context, false);
+ if (wc->frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
+ appendStringInfoString(buf, " PRECEDING ");
+ else if (wc->frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
+ appendStringInfoString(buf, " FOLLOWING ");
+ else
+ Assert(false);
+ }
+ else
+ Assert(false);
+ }
+ if (wc->frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW)
+ appendStringInfoString(buf, "EXCLUDE CURRENT ROW ");
+ else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_GROUP)
+ appendStringInfoString(buf, "EXCLUDE GROUP ");
+ else if (wc->frameOptions & FRAMEOPTION_EXCLUDE_TIES)
+ appendStringInfoString(buf, "EXCLUDE TIES ");
+ /* we will now have a trailing space; remove it */
+ buf->len--;
+ }
+ appendStringInfoChar(buf, ')');
+}
+
+/* ----------
+ * get_insert_query_def - Parse back an INSERT parsetree
+ * ----------
+ */
+static void
+get_insert_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ RangeTblEntry *select_rte = NULL;
+ RangeTblEntry *values_rte = NULL;
+ RangeTblEntry *rte;
+ ListCell *l;
+ List *strippedexprs = NIL;
+
+ /* Insert the WITH clause if given */
+ get_with_clause(query, context);
+
+ /*
+ * If it's an INSERT ... SELECT or multi-row VALUES, there will be a
+ * single RTE for the SELECT or VALUES. Plain VALUES has neither.
+ */
+ foreach(l, query->rtable)
+ {
+ rte = (RangeTblEntry *) lfirst(l);
+
+ if (rte->rtekind == RTE_SUBQUERY)
+ {
+ if (select_rte)
+ elog(ERROR, "too many subquery RTEs in INSERT");
+ select_rte = rte;
+ }
+
+ if (rte->rtekind == RTE_VALUES)
+ {
+ if (values_rte)
+ elog(ERROR, "too many values RTEs in INSERT");
+ values_rte = rte;
+ }
+ }
+ if (select_rte && values_rte)
+ elog(ERROR, "both subquery and values RTEs in INSERT");
+
+ /*
+ * Start the query with INSERT INTO relname
+ */
+ rte = rt_fetch(query->resultRelation, query->rtable);
+ Assert(rte->rtekind == RTE_RELATION);
+
+ if (PRETTY_INDENT(context))
+ {
+ context->indentLevel += PRETTYINDENT_STD;
+ appendStringInfoChar(buf, ' ');
+ }
+ appendStringInfo(buf, "INSERT INTO %s",
+ generate_relation_or_shard_name(rte->relid,
+ context->distrelid,
+ context->shardid, NIL));
+
+ /* Print the relation alias, if needed; INSERT requires explicit AS */
+ get_rte_alias(rte, query->resultRelation, true, context);
+
+ /* always want a space here */
+ appendStringInfoChar(buf, ' ');
+
+ /*
+ * Add the insert-column-names list. Any indirection decoration needed on
+ * the column names can be inferred from the top targetlist.
+ */
+ if (query->targetList)
+ {
+ strippedexprs = get_insert_column_names_list(query->targetList,
+ buf, context, rte);
+ }
+
+ if (query->override)
+ {
+ if (query->override == OVERRIDING_SYSTEM_VALUE)
+ appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE ");
+ else if (query->override == OVERRIDING_USER_VALUE)
+ appendStringInfoString(buf, "OVERRIDING USER VALUE ");
+ }
+
+ if (select_rte)
+ {
+ /* Add the SELECT */
+ get_query_def(select_rte->subquery, buf, context->namespaces, NULL,
+ false,
+ context->prettyFlags, context->wrapColumn,
+ context->indentLevel);
+ }
+ else if (values_rte)
+ {
+ /* Add the multi-VALUES expression lists */
+ get_values_def(values_rte->values_lists, context);
+ }
+ else if (strippedexprs)
+ {
+ /* Add the single-VALUES expression list */
+ appendContextKeyword(context, "VALUES (",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
+ get_rule_list_toplevel(strippedexprs, context, false);
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ {
+ /* No expressions, so it must be DEFAULT VALUES */
+ appendStringInfoString(buf, "DEFAULT VALUES");
+ }
+
+ /* Add ON CONFLICT if present */
+ if (query->onConflict)
+ {
+ OnConflictExpr *confl = query->onConflict;
+
+ appendStringInfoString(buf, " ON CONFLICT");
+
+ if (confl->arbiterElems)
+ {
+ /* Add the single-VALUES expression list */
+ appendStringInfoChar(buf, '(');
+ get_rule_expr((Node *) confl->arbiterElems, context, false);
+ appendStringInfoChar(buf, ')');
+
+ /* Add a WHERE clause (for partial indexes) if given */
+ if (confl->arbiterWhere != NULL)
+ {
+ bool save_varprefix;
+
+ /*
+ * Force non-prefixing of Vars, since parser assumes that they
+ * belong to target relation. WHERE clause does not use
+ * InferenceElem, so this is separately required.
+ */
+ save_varprefix = context->varprefix;
+ context->varprefix = false;
+
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(confl->arbiterWhere, context, false);
+
+ context->varprefix = save_varprefix;
+ }
+ }
+ else if (OidIsValid(confl->constraint))
+ {
+ char *constraint = get_constraint_name(confl->constraint);
+ int64 shardId = context->shardid;
+
+ if (shardId > 0)
+ {
+ AppendShardIdToName(&constraint, shardId);
+ }
+
+ if (!constraint)
+ elog(ERROR, "cache lookup failed for constraint %u",
+ confl->constraint);
+ appendStringInfo(buf, " ON CONSTRAINT %s",
+ quote_identifier(constraint));
+ }
+
+ if (confl->action == ONCONFLICT_NOTHING)
+ {
+ appendStringInfoString(buf, " DO NOTHING");
+ }
+ else
+ {
+ appendStringInfoString(buf, " DO UPDATE SET ");
+ /* Deparse targetlist */
+ get_update_query_targetlist_def(query, confl->onConflictSet,
+ context, rte);
+
+ /* Add a WHERE clause if given */
+ if (confl->onConflictWhere != NULL)
+ {
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(confl->onConflictWhere, context, false);
+ }
+ }
+ }
+
+ /* Add RETURNING if present */
+ if (query->returningList)
+ {
+ appendContextKeyword(context, " RETURNING",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_target_list(query->returningList, context, NULL, colNamesVisible);
+ }
+}
+
+/* ----------
+ * get_update_query_def - Parse back an UPDATE parsetree
+ * ----------
+ */
+static void
+get_update_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ RangeTblEntry *rte;
+
+ /* Insert the WITH clause if given */
+ get_with_clause(query, context);
+
+ /*
+ * Start the query with UPDATE relname SET
+ */
+ rte = rt_fetch(query->resultRelation, query->rtable);
+
+ if (PRETTY_INDENT(context))
+ {
+ appendStringInfoChar(buf, ' ');
+ context->indentLevel += PRETTYINDENT_STD;
+ }
+
+ /* if it's a shard, do differently */
+ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ char *fragmentSchemaName = NULL;
+ char *fragmentTableName = NULL;
+
+ ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL);
+
+ /* use schema and table name from the remote alias */
+ appendStringInfo(buf, "UPDATE %s%s",
+ only_marker(rte),
+ generate_fragment_name(fragmentSchemaName, fragmentTableName));
+
+ if(rte->eref != NULL)
+ appendStringInfo(buf, " %s",
+ quote_identifier(get_rtable_name(query->resultRelation, context)));
+ }
+ else
+ {
+ appendStringInfo(buf, "UPDATE %s%s",
+ only_marker(rte),
+ generate_relation_or_shard_name(rte->relid,
+ context->distrelid,
+ context->shardid, NIL));
+
+ /* Print the relation alias, if needed */
+ get_rte_alias(rte, query->resultRelation, false, context);
+ }
+
+ appendStringInfoString(buf, " SET ");
+
+ /* Deparse targetlist */
+ get_update_query_targetlist_def(query, query->targetList, context, rte);
+
+ /* Add the FROM clause if needed */
+ get_from_clause(query, " FROM ", context);
+
+ /* Add a WHERE clause if given */
+ if (query->jointree->quals != NULL)
+ {
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(query->jointree->quals, context, false);
+ }
+
+ /* Add RETURNING if present */
+ if (query->returningList)
+ {
+ appendContextKeyword(context, " RETURNING",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_target_list(query->returningList, context, NULL, colNamesVisible);
+ }
+}
+
+/* ----------
+ * get_update_query_targetlist_def - Parse back an UPDATE targetlist
+ * ----------
+ */
+static void
+get_update_query_targetlist_def(Query *query, List *targetList,
+ deparse_context *context, RangeTblEntry *rte)
+{
+ StringInfo buf = context->buf;
+ ListCell *l;
+ ListCell *next_ma_cell;
+ int remaining_ma_columns;
+ const char *sep;
+ SubLink *cur_ma_sublink;
+ List *ma_sublinks;
+
+ /*
+ * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks
+ * into a list. We expect them to appear, in ID order, in resjunk tlist
+ * entries.
+ */
+ ma_sublinks = NIL;
+ if (query->hasSubLinks) /* else there can't be any */
+ {
+ foreach(l, targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk && IsA(tle->expr, SubLink))
+ {
+ SubLink *sl = (SubLink *) tle->expr;
+
+ if (sl->subLinkType == MULTIEXPR_SUBLINK)
+ {
+ ma_sublinks = lappend(ma_sublinks, sl);
+ Assert(sl->subLinkId == list_length(ma_sublinks));
+ }
+ }
+ }
+ }
+ next_ma_cell = list_head(ma_sublinks);
+ cur_ma_sublink = NULL;
+ remaining_ma_columns = 0;
+
+ /* Add the comma separated list of 'attname = value' */
+ sep = "";
+ foreach(l, targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ Node *expr;
+
+ if (tle->resjunk)
+ continue; /* ignore junk entries */
+
+ /* Emit separator (OK whether we're in multiassignment or not) */
+ appendStringInfoString(buf, sep);
+ sep = ", ";
+
+ /*
+ * Check to see if we're starting a multiassignment group: if so,
+ * output a left paren.
+ */
+ if (next_ma_cell != NULL && cur_ma_sublink == NULL)
+ {
+ /*
+ * We must dig down into the expr to see if it's a PARAM_MULTIEXPR
+ * Param. That could be buried under FieldStores and
+ * SubscriptingRefs and CoerceToDomains (cf processIndirection()),
+ * and underneath those there could be an implicit type coercion.
+ * Because we would ignore implicit type coercions anyway, we
+ * don't need to be as careful as processIndirection() is about
+ * descending past implicit CoerceToDomains.
+ */
+ expr = (Node *) tle->expr;
+ while (expr)
+ {
+ if (IsA(expr, FieldStore))
+ {
+ FieldStore *fstore = (FieldStore *) expr;
+
+ expr = (Node *) linitial(fstore->newvals);
+ }
+ else if (IsA(expr, SubscriptingRef))
+ {
+ SubscriptingRef *sbsref = (SubscriptingRef *) expr;
+
+ if (sbsref->refassgnexpr == NULL)
+ break;
+ expr = (Node *) sbsref->refassgnexpr;
+ }
+ else if (IsA(expr, CoerceToDomain))
+ {
+ CoerceToDomain *cdomain = (CoerceToDomain *) expr;
+
+ if (cdomain->coercionformat != COERCE_IMPLICIT_CAST)
+ break;
+ expr = (Node *) cdomain->arg;
+ }
+ else
+ break;
+ }
+ expr = strip_implicit_coercions(expr);
+
+ if (expr && IsA(expr, Param) &&
+ ((Param *) expr)->paramkind == PARAM_MULTIEXPR)
+ {
+ cur_ma_sublink = (SubLink *) lfirst(next_ma_cell);
+ next_ma_cell = lnext(ma_sublinks, next_ma_cell);
+ remaining_ma_columns = count_nonjunk_tlist_entries(
+ ((Query *) cur_ma_sublink->subselect)->targetList);
+ Assert(((Param *) expr)->paramid ==
+ ((cur_ma_sublink->subLinkId << 16) | 1));
+ appendStringInfoChar(buf, '(');
+ }
+ }
+
+ /*
+ * Put out name of target column; look in the catalogs, not at
+ * tle->resname, since resname will fail to track RENAME.
+ */
+ appendStringInfoString(buf,
+ quote_identifier(get_attname(rte->relid,
+ tle->resno,
+ false)));
+
+ /*
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
+ */
+ expr = processIndirection((Node *) tle->expr, context);
+
+ /*
+ * If we're in a multiassignment, skip printing anything more, unless
+ * this is the last column; in which case, what we print should be the
+ * sublink, not the Param.
+ */
+ if (cur_ma_sublink != NULL)
+ {
+ if (--remaining_ma_columns > 0)
+ continue; /* not the last column of multiassignment */
+ appendStringInfoChar(buf, ')');
+ expr = (Node *) cur_ma_sublink;
+ cur_ma_sublink = NULL;
+ }
+
+ appendStringInfoString(buf, " = ");
+
+ get_rule_expr(expr, context, false);
+ }
+}
+
+/* ----------
+ * get_delete_query_def - Parse back a DELETE parsetree
+ * ----------
+ */
+static void
+get_delete_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ RangeTblEntry *rte;
+
+ /* Insert the WITH clause if given */
+ get_with_clause(query, context);
+
+ /*
+ * Start the query with DELETE FROM relname
+ */
+ rte = rt_fetch(query->resultRelation, query->rtable);
+
+ if (PRETTY_INDENT(context))
+ {
+ appendStringInfoChar(buf, ' ');
+ context->indentLevel += PRETTYINDENT_STD;
+ }
+
+ /* if it's a shard, do differently */
+ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ char *fragmentSchemaName = NULL;
+ char *fragmentTableName = NULL;
+
+ ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL);
+
+ /* use schema and table name from the remote alias */
+ appendStringInfo(buf, "DELETE FROM %s%s",
+ only_marker(rte),
+ generate_fragment_name(fragmentSchemaName, fragmentTableName));
+
+ if(rte->eref != NULL)
+ appendStringInfo(buf, " %s",
+ quote_identifier(get_rtable_name(query->resultRelation, context)));
+ }
+ else
+ {
+ appendStringInfo(buf, "DELETE FROM %s%s",
+ only_marker(rte),
+ generate_relation_or_shard_name(rte->relid,
+ context->distrelid,
+ context->shardid, NIL));
+
+ /* Print the relation alias, if needed */
+ get_rte_alias(rte, query->resultRelation, false, context);
+ }
+
+ /* Add the USING clause if given */
+ get_from_clause(query, " USING ", context);
+
+ /* Add a WHERE clause if given */
+ if (query->jointree->quals != NULL)
+ {
+ appendContextKeyword(context, " WHERE ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_rule_expr(query->jointree->quals, context, false);
+ }
+
+ /* Add RETURNING if present */
+ if (query->returningList)
+ {
+ appendContextKeyword(context, " RETURNING",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
+ get_target_list(query->returningList, context, NULL, colNamesVisible);
+ }
+}
+
+
+/* ----------
+ * get_merge_query_def - Parse back a MERGE parsetree
+ * ----------
+ */
+static void
+get_merge_query_def(Query *query, deparse_context *context,
+ bool colNamesVisible)
+{
+ StringInfo buf = context->buf;
+ RangeTblEntry *rte;
+ ListCell *lc;
+
+ /* Insert the WITH clause if given */
+ get_with_clause(query, context);
+
+ /*
+ * Start the query with MERGE INTO relname
+ */
+ rte = ExtractResultRelationRTE(query);
+
+ if (PRETTY_INDENT(context))
+ {
+ appendStringInfoChar(buf, ' ');
+ context->indentLevel += PRETTYINDENT_STD;
+ }
+
+ /* if it's a shard, do differently */
+ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ char *fragmentSchemaName = NULL;
+ char *fragmentTableName = NULL;
+
+ ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL);
+
+ /* use schema and table name from the remote alias */
+ appendStringInfo(buf, "MERGE INTO %s%s",
+ only_marker(rte),
+ generate_fragment_name(fragmentSchemaName, fragmentTableName));
+
+ if(rte->eref != NULL)
+ appendStringInfo(buf, " %s",
+ quote_identifier(get_rtable_name(query->resultRelation, context)));
+ }
+ else
+ {
+ appendStringInfo(buf, "MERGE INTO %s%s",
+ only_marker(rte),
+ generate_relation_or_shard_name(rte->relid,
+ context->distrelid,
+ context->shardid, NIL));
+
+ if (rte->alias != NULL)
+ appendStringInfo(buf, " %s",
+ quote_identifier(get_rtable_name(query->resultRelation, context)));
+ }
+
+ /* Print the source relation and join clause */
+ get_from_clause(query, " USING ", context);
+ appendContextKeyword(context, " ON ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
+ get_rule_expr(query->jointree->quals, context, false);
+
+ /* Print each merge action */
+ foreach(lc, query->mergeActionList)
+ {
+ MergeAction *action = lfirst_node(MergeAction, lc);
+
+ appendContextKeyword(context, " WHEN ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
+ appendStringInfo(buf, "%sMATCHED", action->matched ? "" : "NOT ");
+
+ if (action->qual)
+ {
+ appendContextKeyword(context, " AND ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 3);
+ get_rule_expr(action->qual, context, false);
+ }
+ appendContextKeyword(context, " THEN ",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 3);
+
+ if (action->commandType == CMD_INSERT)
+ {
+ /* This generally matches get_insert_query_def() */
+ List *strippedexprs = NIL;
+ const char *sep = "";
+ ListCell *lc2;
+
+ appendStringInfoString(buf, "INSERT");
+
+ if (action->targetList)
+ appendStringInfoString(buf, " (");
+ foreach(lc2, action->targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(lc2);
+
+ Assert(!tle->resjunk);
+
+ appendStringInfoString(buf, sep);
+ sep = ", ";
+
+ appendStringInfoString(buf,
+ quote_identifier(get_attname(rte->relid,
+ tle->resno,
+ false)));
+ strippedexprs = lappend(strippedexprs,
+ processIndirection((Node *) tle->expr,
+ context));
+ }
+ if (action->targetList)
+ appendStringInfoChar(buf, ')');
+
+ if (action->override)
+ {
+ if (action->override == OVERRIDING_SYSTEM_VALUE)
+ appendStringInfoString(buf, " OVERRIDING SYSTEM VALUE");
+ else if (action->override == OVERRIDING_USER_VALUE)
+ appendStringInfoString(buf, " OVERRIDING USER VALUE");
+ }
+
+ if (strippedexprs)
+ {
+ appendContextKeyword(context, " VALUES (",
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 4);
+ get_rule_list_toplevel(strippedexprs, context, false);
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ appendStringInfoString(buf, " DEFAULT VALUES");
+ }
+ else if (action->commandType == CMD_UPDATE)
+ {
+ appendStringInfoString(buf, "UPDATE SET ");
+ get_update_query_targetlist_def(query, action->targetList,
+ context, rte);
+ }
+ else if (action->commandType == CMD_DELETE)
+ appendStringInfoString(buf, "DELETE");
+ else if (action->commandType == CMD_NOTHING)
+ appendStringInfoString(buf, "DO NOTHING");
+ }
+
+ /* No RETURNING support in MERGE yet */
+ Assert(query->returningList == NIL);
+
+ ereport(DEBUG1, (errmsg("", buf->data)));
+}
+
+
+/* ----------
+ * get_utility_query_def - Parse back a UTILITY parsetree
+ * ----------
+ */
+static void
+get_utility_query_def(Query *query, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+
+ if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt))
+ {
+ NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt;
+
+ appendContextKeyword(context, "",
+ 0, PRETTYINDENT_STD, 1);
+ appendStringInfo(buf, "NOTIFY %s",
+ quote_identifier(stmt->conditionname));
+ if (stmt->payload)
+ {
+ appendStringInfoString(buf, ", ");
+ simple_quote_literal(buf, stmt->payload);
+ }
+ }
+ else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt))
+ {
+ TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt;
+ List *relationList = stmt->relations;
+ ListCell *relationCell = NULL;
+
+ appendContextKeyword(context, "",
+ 0, PRETTYINDENT_STD, 1);
+
+ appendStringInfo(buf, "TRUNCATE TABLE");
+
+ foreach(relationCell, relationList)
+ {
+ RangeVar *relationVar = (RangeVar *) lfirst(relationCell);
+ Oid relationId = RangeVarGetRelid(relationVar, NoLock, false);
+ char *relationName = generate_relation_or_shard_name(relationId,
+ context->distrelid,
+ context->shardid, NIL);
+ appendStringInfo(buf, " %s", relationName);
+
+ if (lnext(relationList, relationCell) != NULL)
+ {
+ appendStringInfo(buf, ",");
+ }
+ }
+
+ if (stmt->restart_seqs)
+ {
+ appendStringInfo(buf, " RESTART IDENTITY");
+ }
+
+ if (stmt->behavior == DROP_CASCADE)
+ {
+ appendStringInfo(buf, " CASCADE");
+ }
+ }
+ else
+ {
+ /* Currently only NOTIFY utility commands can appear in rules */
+ elog(ERROR, "unexpected utility statement type");
+ }
+}
+
+/*
+ * Display a Var appropriately.
+ *
+ * In some cases (currently only when recursing into an unnamed join)
+ * the Var's varlevelsup has to be interpreted with respect to a context
+ * above the current one; levelsup indicates the offset.
+ *
+ * If istoplevel is true, the Var is at the top level of a SELECT's
+ * targetlist, which means we need special treatment of whole-row Vars.
+ * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a
+ * dirty hack to prevent "tab.*" from being expanded into multiple columns.
+ * (The parser will strip the useless coercion, so no inefficiency is added in
+ * dump and reload.) We used to print just "tab" in such cases, but that is
+ * ambiguous and will yield the wrong result if "tab" is also a plain column
+ * name in the query.
+ *
+ * Returns the attname of the Var, or NULL if the Var has no attname (because
+ * it is a whole-row Var or a subplan output reference).
+ */
+static char *
+get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ RangeTblEntry *rte;
+ AttrNumber attnum;
+ int varno;
+ AttrNumber varattno;
+ int netlevelsup;
+ deparse_namespace *dpns;
+ deparse_columns *colinfo;
+ char *refname;
+ char *attname;
+
+ /* Find appropriate nesting depth */
+ netlevelsup = var->varlevelsup + levelsup;
+ if (netlevelsup >= list_length(context->namespaces))
+ elog(ERROR, "bogus varlevelsup: %d offset %d",
+ var->varlevelsup, levelsup);
+ dpns = (deparse_namespace *) list_nth(context->namespaces,
+ netlevelsup);
+
+ varno = var->varno;
+ varattno = var->varattno;
+
+
+ if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) {
+ rte = rt_fetch(var->varnosyn, dpns->rtable);
+
+ /*
+ * if the rte var->varnosyn points to is not a regular table and it is a join
+ * then the correct relname will be found with var->varnosyn and var->varattnosyn
+ */
+ if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) {
+ varno = var->varnosyn;
+ varattno = var->varattnosyn;
+ }
+ }
+
+ /*
+ * Try to find the relevant RTE in this rtable. In a plan tree, it's
+ * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig
+ * down into the subplans, or INDEX_VAR, which is resolved similarly. Also
+ * find the aliases previously assigned for this RTE.
+ */
+ if (varno >= 1 && varno <= list_length(dpns->rtable))
+ {
+
+ /*
+ * We might have been asked to map child Vars to some parent relation.
+ */
+ if (context->appendparents && dpns->appendrels)
+ {
+
+ int pvarno = varno;
+ AttrNumber pvarattno = varattno;
+ AppendRelInfo *appinfo = dpns->appendrels[pvarno];
+ bool found = false;
+
+ /* Only map up to inheritance parents, not UNION ALL appendrels */
+ while (appinfo &&
+ rt_fetch(appinfo->parent_relid,
+ dpns->rtable)->rtekind == RTE_RELATION)
+ {
+ found = false;
+ if (pvarattno > 0) /* system columns stay as-is */
+ {
+ if (pvarattno > appinfo->num_child_cols)
+ break; /* safety check */
+ pvarattno = appinfo->parent_colnos[pvarattno - 1];
+ if (pvarattno == 0)
+ break; /* Var is local to child */
+ }
+
+ pvarno = appinfo->parent_relid;
+ found = true;
+
+ /* If the parent is itself a child, continue up. */
+ Assert(pvarno > 0 && pvarno <= list_length(dpns->rtable));
+ appinfo = dpns->appendrels[pvarno];
+ }
+
+ /*
+ * If we found an ancestral rel, and that rel is included in
+ * appendparents, print that column not the original one.
+ */
+ if (found && bms_is_member(pvarno, context->appendparents))
+ {
+ varno = pvarno;
+ varattno = pvarattno;
+ }
+ }
+
+ rte = rt_fetch(varno, dpns->rtable);
+ refname = (char *) list_nth(dpns->rtable_names, varno - 1);
+ colinfo = deparse_columns_fetch(varno, dpns);
+ attnum = varattno;
+ }
+ else
+ {
+ resolve_special_varno((Node *) var, context, get_special_variable,
+ NULL);
+ return NULL;
+ }
+
+ /*
+ * The planner will sometimes emit Vars referencing resjunk elements of a
+ * subquery's target list (this is currently only possible if it chooses
+ * to generate a "physical tlist" for a SubqueryScan or CteScan node).
+ * Although we prefer to print subquery-referencing Vars using the
+ * subquery's alias, that's not possible for resjunk items since they have
+ * no alias. So in that case, drill down to the subplan and print the
+ * contents of the referenced tlist item. This works because in a plan
+ * tree, such Vars can only occur in a SubqueryScan or CteScan node, and
+ * we'll have set dpns->inner_plan to reference the child plan node.
+ */
+ if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) &&
+ attnum > list_length(rte->eref->colnames) &&
+ dpns->inner_plan)
+ {
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+
+ tle = get_tle_by_resno(dpns->inner_tlist, attnum);
+ if (!tle)
+ elog(ERROR, "invalid attnum %d for relation \"%s\"",
+ attnum, rte->eref->aliasname);
+
+ Assert(netlevelsup == 0);
+ push_child_plan(dpns, dpns->inner_plan, &save_dpns);
+
+ /*
+ * Force parentheses because our caller probably assumed a Var is a
+ * simple expression.
+ */
+ if (!IsA(tle->expr, Var))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr((Node *) tle->expr, context, true);
+ if (!IsA(tle->expr, Var))
+ appendStringInfoChar(buf, ')');
+
+ pop_child_plan(dpns, &save_dpns);
+ return NULL;
+ }
+
+ /*
+ * If it's an unnamed join, look at the expansion of the alias variable.
+ * If it's a simple reference to one of the input vars, then recursively
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
+ * happen with "dangerous" merged columns in a JOIN USING; we took pains
+ * previously to make the unqualified column name unique in such cases.)
+ *
+ * This wouldn't work in decompiling plan trees, because we don't store
+ * joinaliasvars lists after planning; but a plan tree should never
+ * contain a join alias variable.
+ */
+ if (rte->rtekind == RTE_JOIN && rte->alias == NULL)
+ {
+ if (rte->joinaliasvars == NIL)
+ elog(ERROR, "cannot decompile join alias var in plan tree");
+ if (attnum > 0)
+ {
+ Var *aliasvar;
+
+ aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1);
+ /* we intentionally don't strip implicit coercions here */
+ if (aliasvar && IsA(aliasvar, Var))
+ {
+ return get_variable(aliasvar, var->varlevelsup + levelsup,
+ istoplevel, context);
+ }
+ }
+
+ /*
+ * Unnamed join has no refname. (Note: since it's unnamed, there is
+ * no way the user could have referenced it to create a whole-row Var
+ * for it. So we don't have to cover that case below.)
+ */
+ Assert(refname == NULL);
+ }
+
+ if (attnum == InvalidAttrNumber)
+ attname = NULL;
+ else if (attnum > 0)
+ {
+ /* Get column name to use from the colinfo struct */
+ if (attnum > colinfo->num_cols)
+ elog(ERROR, "invalid attnum %d for relation \"%s\"",
+ attnum, rte->eref->aliasname);
+ attname = colinfo->colnames[attnum - 1];
+
+ /*
+ * If we find a Var referencing a dropped column, it seems better to
+ * print something (anything) than to fail. In general this should
+ * not happen, but it used to be possible for some cases involving
+ * functions returning named composite types, and perhaps there are
+ * still bugs out there.
+ */
+ if (attname == NULL)
+ attname = "?dropped?column?";
+ }
+ else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ /* System column on a Citus shard */
+ attname = get_attname(rte->relid, attnum, false);
+ }
+ else
+ {
+ /* System column - name is fixed, get it from the catalog */
+ attname = get_rte_attribute_name(rte, attnum);
+ }
+
+ if (refname && (context->varprefix || attname == NULL))
+ {
+ appendStringInfoString(buf, quote_identifier(refname));
+ appendStringInfoChar(buf, '.');
+ }
+ if (attname)
+ appendStringInfoString(buf, quote_identifier(attname));
+ else
+ {
+ appendStringInfoChar(buf, '*');
+
+ if (istoplevel)
+ {
+ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ /* use rel.*::shard_name instead of rel.*::table_name */
+ appendStringInfo(buf, "::%s",
+ generate_rte_shard_name(rte));
+ }
+ else
+ {
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(var->vartype,
+ var->vartypmod));
+ }
+ }
+ }
+
+ return attname;
+}
+
+/*
+ * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This
+ * routine is actually a callback for get_special_varno, which handles finding
+ * the correct TargetEntry. We get the expression contained in that
+ * TargetEntry and just need to deparse it, a job we can throw back on
+ * get_rule_expr.
+ */
+static void
+get_special_variable(Node *node, deparse_context *context, void *callback_arg)
+{
+ StringInfo buf = context->buf;
+
+ /*
+ * For a non-Var referent, force parentheses because our caller probably
+ * assumed a Var is a simple expression.
+ */
+ if (!IsA(node, Var))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr(node, context, true);
+ if (!IsA(node, Var))
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR,
+ * INDEX_VAR) until we find a real Var or some kind of non-Var node; then,
+ * invoke the callback provided.
+ */
+static void
+resolve_special_varno(Node *node, deparse_context *context, rsv_callback callback, void *callback_arg)
+{
+ Var *var;
+ deparse_namespace *dpns;
+
+ /* This function is recursive, so let's be paranoid. */
+ check_stack_depth();
+
+ /* If it's not a Var, invoke the callback. */
+ if (!IsA(node, Var))
+ {
+ (*callback) (node, context, callback_arg);
+ return;
+ }
+
+ /* Find appropriate nesting depth */
+ var = (Var *) node;
+ dpns = (deparse_namespace *) list_nth(context->namespaces,
+ var->varlevelsup);
+
+ /*
+ * It's a special RTE, so recurse.
+ */
+ if (var->varno == OUTER_VAR && dpns->outer_tlist)
+ {
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+ Bitmapset *save_appendparents;
+
+ tle = get_tle_by_resno(dpns->outer_tlist, var->varattno);
+ if (!tle)
+ elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno);
+
+ /* If we're descending to the first child of an Append or MergeAppend,
+ * update appendparents. This will affect deparsing of all Vars
+ * appearing within the eventually-resolved subexpression.
+ */
+ save_appendparents = context->appendparents;
+
+ if (IsA(dpns->plan, Append))
+ context->appendparents = bms_union(context->appendparents,
+ ((Append *) dpns->plan)->apprelids);
+ else if (IsA(dpns->plan, MergeAppend))
+ context->appendparents = bms_union(context->appendparents,
+ ((MergeAppend *) dpns->plan)->apprelids);
+
+ push_child_plan(dpns, dpns->outer_plan, &save_dpns);
+ resolve_special_varno((Node *) tle->expr, context,
+ callback, callback_arg);
+ pop_child_plan(dpns, &save_dpns);
+ context->appendparents = save_appendparents;
+ return;
+ }
+ else if (var->varno == INNER_VAR && dpns->inner_tlist)
+ {
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+
+ tle = get_tle_by_resno(dpns->inner_tlist, var->varattno);
+ if (!tle)
+ elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno);
+
+ push_child_plan(dpns, dpns->inner_plan, &save_dpns);
+ resolve_special_varno((Node *) tle->expr, context, callback, callback_arg);
+ pop_child_plan(dpns, &save_dpns);
+ return;
+ }
+ else if (var->varno == INDEX_VAR && dpns->index_tlist)
+ {
+ TargetEntry *tle;
+
+ tle = get_tle_by_resno(dpns->index_tlist, var->varattno);
+ if (!tle)
+ elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno);
+
+ resolve_special_varno((Node *) tle->expr, context, callback, callback_arg);
+ return;
+ }
+ else if (var->varno < 1 || var->varno > list_length(dpns->rtable))
+ elog(ERROR, "bogus varno: %d", var->varno);
+
+ /* Not special. Just invoke the callback. */
+ (*callback) (node, context, callback_arg);
+}
+
+/*
+ * Get the name of a field of an expression of composite type. The
+ * expression is usually a Var, but we handle other cases too.
+ *
+ * levelsup is an extra offset to interpret the Var's varlevelsup correctly.
+ *
+ * This is fairly straightforward when the expression has a named composite
+ * type; we need only look up the type in the catalogs. However, the type
+ * could also be RECORD. Since no actual table or view column is allowed to
+ * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
+ * or to a subquery output. We drill down to find the ultimate defining
+ * expression and attempt to infer the field name from it. We ereport if we
+ * can't determine the name.
+ *
+ * Similarly, a PARAM of type RECORD has to refer to some expression of
+ * a determinable composite type.
+ */
+static const char *
+get_name_for_var_field(Var *var, int fieldno,
+ int levelsup, deparse_context *context)
+{
+ RangeTblEntry *rte;
+ AttrNumber attnum;
+ int netlevelsup;
+ deparse_namespace *dpns;
+ int varno;
+ AttrNumber varattno;
+ TupleDesc tupleDesc;
+ Node *expr;
+
+ /*
+ * If it's a RowExpr that was expanded from a whole-row Var, use the
+ * column names attached to it. (We could let get_expr_result_tupdesc()
+ * handle this, but it's much cheaper to just pull out the name we need.)
+ */
+ if (IsA(var, RowExpr))
+ {
+ RowExpr *r = (RowExpr *) var;
+
+ if (fieldno > 0 && fieldno <= list_length(r->colnames))
+ return strVal(list_nth(r->colnames, fieldno - 1));
+ }
+
+ /*
+ * If it's a Param of type RECORD, try to find what the Param refers to.
+ */
+ if (IsA(var, Param))
+ {
+ Param *param = (Param *) var;
+ ListCell *ancestor_cell;
+
+ expr = find_param_referent(param, context, &dpns, &ancestor_cell);
+ if (expr)
+ {
+ /* Found a match, so recurse to decipher the field name */
+ deparse_namespace save_dpns;
+ const char *result;
+
+ push_ancestor_plan(dpns, ancestor_cell, &save_dpns);
+ result = get_name_for_var_field((Var *) expr, fieldno,
+ 0, context);
+ pop_ancestor_plan(dpns, &save_dpns);
+ return result;
+ }
+ }
+
+ /*
+ * If it's a Var of type RECORD, we have to find what the Var refers to;
+ * if not, we can use get_expr_result_tupdesc().
+ */
+ if (!IsA(var, Var) ||
+ var->vartype != RECORDOID)
+ {
+ tupleDesc = get_expr_result_tupdesc((Node *) var, false);
+ /* Got the tupdesc, so we can extract the field name */
+ Assert(fieldno >= 1 && fieldno <= tupleDesc->natts);
+ return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname);
+ }
+
+ /* Find appropriate nesting depth */
+ netlevelsup = var->varlevelsup + levelsup;
+ if (netlevelsup >= list_length(context->namespaces))
+ elog(ERROR, "bogus varlevelsup: %d offset %d",
+ var->varlevelsup, levelsup);
+ dpns = (deparse_namespace *) list_nth(context->namespaces,
+ netlevelsup);
+
+ varno = var->varno;
+ varattno = var->varattno;
+
+ if (var->varnosyn > 0 && var->varnosyn <= list_length(dpns->rtable) && dpns->plan == NULL) {
+ rte = rt_fetch(var->varnosyn, dpns->rtable);
+
+ /*
+ * if the rte var->varnosyn points to is not a regular table and it is a join
+ * then the correct relname will be found with var->varnosyn and var->varattnosyn
+ */
+ if (rte->rtekind == RTE_JOIN && rte->relid == 0 && var->varnosyn != var->varno) {
+ varno = var->varnosyn;
+ varattno = var->varattnosyn;
+ }
+ }
+
+ /*
+ * Try to find the relevant RTE in this rtable. In a plan tree, it's
+ * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig
+ * down into the subplans, or INDEX_VAR, which is resolved similarly.
+ */
+ if (varno >= 1 && varno <= list_length(dpns->rtable))
+ {
+ rte = rt_fetch(varno, dpns->rtable);
+ attnum = varattno;
+ }
+ else if (varno == OUTER_VAR && dpns->outer_tlist)
+ {
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+ const char *result;
+
+ tle = get_tle_by_resno(dpns->outer_tlist, varattno);
+ if (!tle)
+ elog(ERROR, "bogus varattno for OUTER_VAR var: %d", varattno);
+
+ Assert(netlevelsup == 0);
+ push_child_plan(dpns, dpns->outer_plan, &save_dpns);
+
+ result = get_name_for_var_field((Var *) tle->expr, fieldno,
+ levelsup, context);
+
+ pop_child_plan(dpns, &save_dpns);
+ return result;
+ }
+ else if (varno == INNER_VAR && dpns->inner_tlist)
+ {
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+ const char *result;
+
+ tle = get_tle_by_resno(dpns->inner_tlist, varattno);
+ if (!tle)
+ elog(ERROR, "bogus varattno for INNER_VAR var: %d", varattno);
+
+ Assert(netlevelsup == 0);
+ push_child_plan(dpns, dpns->inner_plan, &save_dpns);
+
+ result = get_name_for_var_field((Var *) tle->expr, fieldno,
+ levelsup, context);
+
+ pop_child_plan(dpns, &save_dpns);
+ return result;
+ }
+ else if (varno == INDEX_VAR && dpns->index_tlist)
+ {
+ TargetEntry *tle;
+ const char *result;
+
+ tle = get_tle_by_resno(dpns->index_tlist, varattno);
+ if (!tle)
+ elog(ERROR, "bogus varattno for INDEX_VAR var: %d", varattno);
+
+ Assert(netlevelsup == 0);
+
+ result = get_name_for_var_field((Var *) tle->expr, fieldno,
+ levelsup, context);
+
+ return result;
+ }
+ else
+ {
+ elog(ERROR, "bogus varno: %d", varno);
+ return NULL; /* keep compiler quiet */
+ }
+
+ if (attnum == InvalidAttrNumber)
+ {
+ /* Var is whole-row reference to RTE, so select the right field */
+ return get_rte_attribute_name(rte, fieldno);
+ }
+
+ /*
+ * This part has essentially the same logic as the parser's
+ * expandRecordVariable() function, but we are dealing with a different
+ * representation of the input context, and we only need one field name
+ * not a TupleDesc. Also, we need special cases for finding subquery and
+ * CTE subplans when deparsing Plan trees.
+ */
+ expr = (Node *) var; /* default if we can't drill down */
+
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ case RTE_VALUES:
+ case RTE_NAMEDTUPLESTORE:
+ case RTE_RESULT:
+
+ /*
+ * This case should not occur: a column of a table or values list
+ * shouldn't have type RECORD. Fall through and fail (most
+ * likely) at the bottom.
+ */
+ break;
+ case RTE_SUBQUERY:
+ /* Subselect-in-FROM: examine sub-select's output expr */
+ {
+ if (rte->subquery)
+ {
+ TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList,
+ attnum);
+
+ if (ste == NULL || ste->resjunk)
+ elog(ERROR, "subquery %s does not have attribute %d",
+ rte->eref->aliasname, attnum);
+ expr = (Node *) ste->expr;
+ if (IsA(expr, Var))
+ {
+ /*
+ * Recurse into the sub-select to see what its Var
+ * refers to. We have to build an additional level of
+ * namespace to keep in step with varlevelsup in the
+ * subselect.
+ */
+ deparse_namespace mydpns;
+ const char *result;
+
+ set_deparse_for_query(&mydpns, rte->subquery,
+ context->namespaces);
+
+ context->namespaces = lcons(&mydpns,
+ context->namespaces);
+
+ result = get_name_for_var_field((Var *) expr, fieldno,
+ 0, context);
+
+ context->namespaces =
+ list_delete_first(context->namespaces);
+
+ return result;
+ }
+ /* else fall through to inspect the expression */
+ }
+ else
+ {
+ /*
+ * We're deparsing a Plan tree so we don't have complete
+ * RTE entries (in particular, rte->subquery is NULL). But
+ * the only place we'd see a Var directly referencing a
+ * SUBQUERY RTE is in a SubqueryScan plan node, and we can
+ * look into the child plan's tlist instead.
+ */
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+ const char *result;
+
+ if (!dpns->inner_plan)
+ elog(ERROR, "failed to find plan for subquery %s",
+ rte->eref->aliasname);
+ tle = get_tle_by_resno(dpns->inner_tlist, attnum);
+ if (!tle)
+ elog(ERROR, "bogus varattno for subquery var: %d",
+ attnum);
+ Assert(netlevelsup == 0);
+ push_child_plan(dpns, dpns->inner_plan, &save_dpns);
+
+ result = get_name_for_var_field((Var *) tle->expr, fieldno,
+ levelsup, context);
+
+ pop_child_plan(dpns, &save_dpns);
+ return result;
+ }
+ }
+ break;
+ case RTE_JOIN:
+ /* Join RTE --- recursively inspect the alias variable */
+ if (rte->joinaliasvars == NIL)
+ elog(ERROR, "cannot decompile join alias var in plan tree");
+ Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars));
+ expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1);
+ Assert(expr != NULL);
+ /* we intentionally don't strip implicit coercions here */
+ if (IsA(expr, Var))
+ return get_name_for_var_field((Var *) expr, fieldno,
+ var->varlevelsup + levelsup,
+ context);
+ /* else fall through to inspect the expression */
+ break;
+ case RTE_FUNCTION:
+ case RTE_TABLEFUNC:
+
+ /*
+ * We couldn't get here unless a function is declared with one of
+ * its result columns as RECORD, which is not allowed.
+ */
+ break;
+ case RTE_CTE:
+ /* CTE reference: examine subquery's output expr */
+ {
+ CommonTableExpr *cte = NULL;
+ Index ctelevelsup;
+ ListCell *lc;
+
+ /*
+ * Try to find the referenced CTE using the namespace stack.
+ */
+ ctelevelsup = rte->ctelevelsup + netlevelsup;
+ if (ctelevelsup >= list_length(context->namespaces))
+ lc = NULL;
+ else
+ {
+ deparse_namespace *ctedpns;
+
+ ctedpns = (deparse_namespace *)
+ list_nth(context->namespaces, ctelevelsup);
+ foreach(lc, ctedpns->ctes)
+ {
+ cte = (CommonTableExpr *) lfirst(lc);
+ if (strcmp(cte->ctename, rte->ctename) == 0)
+ break;
+ }
+ }
+ if (lc != NULL)
+ {
+ Query *ctequery = (Query *) cte->ctequery;
+ TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte),
+ attnum);
+
+ if (ste == NULL || ste->resjunk)
+ elog(ERROR, "subquery %s does not have attribute %d",
+ rte->eref->aliasname, attnum);
+ expr = (Node *) ste->expr;
+ if (IsA(expr, Var))
+ {
+ /*
+ * Recurse into the CTE to see what its Var refers to.
+ * We have to build an additional level of namespace
+ * to keep in step with varlevelsup in the CTE.
+ * Furthermore it could be an outer CTE, so we may
+ * have to delete some levels of namespace.
+ */
+ List *save_nslist = context->namespaces;
+ List *new_nslist;
+ deparse_namespace mydpns;
+ const char *result;
+
+ set_deparse_for_query(&mydpns, ctequery,
+ context->namespaces);
+
+ new_nslist = list_copy_tail(context->namespaces,
+ ctelevelsup);
+ context->namespaces = lcons(&mydpns, new_nslist);
+
+ result = get_name_for_var_field((Var *) expr, fieldno,
+ 0, context);
+
+ context->namespaces = save_nslist;
+
+ return result;
+ }
+ /* else fall through to inspect the expression */
+ }
+ else
+ {
+ /*
+ * We're deparsing a Plan tree so we don't have a CTE
+ * list. But the only places we'd see a Var directly
+ * referencing a CTE RTE are in CteScan or WorkTableScan
+ * plan nodes. For those cases, set_deparse_plan arranged
+ * for dpns->inner_plan to be the plan node that emits the
+ * CTE or RecursiveUnion result, and we can look at its
+ * tlist instead.
+ */
+ TargetEntry *tle;
+ deparse_namespace save_dpns;
+ const char *result;
+
+ if (!dpns->inner_plan)
+ elog(ERROR, "failed to find plan for CTE %s",
+ rte->eref->aliasname);
+ tle = get_tle_by_resno(dpns->inner_tlist, attnum);
+ if (!tle)
+ elog(ERROR, "bogus varattno for subquery var: %d",
+ attnum);
+ Assert(netlevelsup == 0);
+ push_child_plan(dpns, dpns->inner_plan, &save_dpns);
+
+ result = get_name_for_var_field((Var *) tle->expr, fieldno,
+ levelsup, context);
+
+ pop_child_plan(dpns, &save_dpns);
+ return result;
+ }
+ }
+ break;
+ }
+
+ /*
+ * We now have an expression we can't expand any more, so see if
+ * get_expr_result_tupdesc() can do anything with it.
+ */
+ tupleDesc = get_expr_result_tupdesc(expr, false);
+ /* Got the tupdesc, so we can extract the field name */
+ Assert(fieldno >= 1 && fieldno <= tupleDesc->natts);
+ return NameStr(TupleDescAttr(tupleDesc, fieldno - 1)->attname);
+}
+
+/*
+ * Try to find the referenced expression for a PARAM_EXEC Param that might
+ * reference a parameter supplied by an upper NestLoop or SubPlan plan node.
+ *
+ * If successful, return the expression and set *dpns_p and *ancestor_cell_p
+ * appropriately for calling push_ancestor_plan(). If no referent can be
+ * found, return NULL.
+ */
+static Node *
+find_param_referent(Param *param, deparse_context *context,
+ deparse_namespace **dpns_p, ListCell **ancestor_cell_p)
+{
+ /* Initialize output parameters to prevent compiler warnings */
+ *dpns_p = NULL;
+ *ancestor_cell_p = NULL;
+
+ /*
+ * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or
+ * SubPlan argument. This will necessarily be in some ancestor of the
+ * current expression's Plan.
+ */
+ if (param->paramkind == PARAM_EXEC)
+ {
+ deparse_namespace *dpns;
+ Plan *child_plan;
+ ListCell *lc;
+
+ dpns = (deparse_namespace *) linitial(context->namespaces);
+ child_plan = dpns->plan;
+
+ foreach(lc, dpns->ancestors)
+ {
+ Node *ancestor = (Node *) lfirst(lc);
+ ListCell *lc2;
+
+ /*
+ * NestLoops transmit params to their inner child only.
+ */
+ if (IsA(ancestor, NestLoop) &&
+ child_plan == innerPlan(ancestor))
+ {
+ NestLoop *nl = (NestLoop *) ancestor;
+
+ foreach(lc2, nl->nestParams)
+ {
+ NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2);
+
+ if (nlp->paramno == param->paramid)
+ {
+ /* Found a match, so return it */
+ *dpns_p = dpns;
+ *ancestor_cell_p = lc;
+ return (Node *) nlp->paramval;
+ }
+ }
+ }
+
+ /*
+ * Check to see if we're crawling up from a subplan.
+ */
+ if(IsA(ancestor, SubPlan))
+ {
+ SubPlan *subplan = (SubPlan *) ancestor;
+ ListCell *lc3;
+ ListCell *lc4;
+
+ /* Matched subplan, so check its arguments */
+ forboth(lc3, subplan->parParam, lc4, subplan->args)
+ {
+ int paramid = lfirst_int(lc3);
+ Node *arg = (Node *) lfirst(lc4);
+
+ if (paramid == param->paramid)
+ {
+ /*
+ * Found a match, so return it. But, since Vars in
+ * the arg are to be evaluated in the surrounding
+ * context, we have to point to the next ancestor item
+ * that is *not* a SubPlan.
+ */
+ ListCell *rest;
+
+ for_each_cell(rest, dpns->ancestors,
+ lnext(dpns->ancestors, lc))
+ {
+ Node *ancestor2 = (Node *) lfirst(rest);
+
+ if (!IsA(ancestor2, SubPlan))
+ {
+ *dpns_p = dpns;
+ *ancestor_cell_p = rest;
+ return arg;
+ }
+ }
+ elog(ERROR, "SubPlan cannot be outermost ancestor");
+ }
+ }
+
+ /* SubPlan isn't a kind of Plan, so skip the rest */
+ continue;
+ }
+
+ /*
+ * We need not consider the ancestor's initPlan list, since
+ * initplans never have any parParams.
+ */
+
+ /* No luck, crawl up to next ancestor */
+ child_plan = (Plan *) ancestor;
+ }
+ }
+
+ /* No referent found */
+ return NULL;
+}
+
+/*
+ * Display a Param appropriately.
+ */
+static void
+get_parameter(Param *param, deparse_context *context)
+{
+ Node *expr;
+ deparse_namespace *dpns;
+ ListCell *ancestor_cell;
+
+ /*
+ * If it's a PARAM_EXEC parameter, try to locate the expression from which
+ * the parameter was computed. Note that failing to find a referent isn't
+ * an error, since the Param might well be a subplan output rather than an
+ * input.
+ */
+ expr = find_param_referent(param, context, &dpns, &ancestor_cell);
+ if (expr)
+ {
+ /* Found a match, so print it */
+ deparse_namespace save_dpns;
+ bool save_varprefix;
+ bool need_paren;
+
+ /* Switch attention to the ancestor plan node */
+ push_ancestor_plan(dpns, ancestor_cell, &save_dpns);
+
+ /*
+ * Force prefixing of Vars, since they won't belong to the relation
+ * being scanned in the original plan node.
+ */
+ save_varprefix = context->varprefix;
+ context->varprefix = true;
+
+ /*
+ * A Param's expansion is typically a Var, Aggref, GroupingFunc, or
+ * upper-level Param, which wouldn't need extra parentheses.
+ * Otherwise, insert parens to ensure the expression looks atomic.
+ */
+ need_paren = !(IsA(expr, Var) ||
+ IsA(expr, Aggref) ||
+ IsA(expr, GroupingFunc) ||
+ IsA(expr, Param));
+ if (need_paren)
+ appendStringInfoChar(context->buf, '(');
+
+ get_rule_expr(expr, context, false);
+
+ if (need_paren)
+ appendStringInfoChar(context->buf, ')');
+
+ context->varprefix = save_varprefix;
+
+ pop_ancestor_plan(dpns, &save_dpns);
+
+ return;
+ }
+
+ /*
+ * If it's an external parameter, see if the outermost namespace provides
+ * function argument names.
+ */
+ if (param->paramkind == PARAM_EXTERN && context->namespaces != NIL)
+ {
+ dpns = llast(context->namespaces);
+ if (dpns->argnames &&
+ param->paramid > 0 &&
+ param->paramid <= dpns->numargs)
+ {
+ char *argname = dpns->argnames[param->paramid - 1];
+
+ if (argname)
+ {
+ bool should_qualify = false;
+ ListCell *lc;
+
+ /*
+ * Qualify the parameter name if there are any other deparse
+ * namespaces with range tables. This avoids qualifying in
+ * trivial cases like "RETURN a + b", but makes it safe in all
+ * other cases.
+ */
+ foreach(lc, context->namespaces)
+ {
+ deparse_namespace *depns = lfirst(lc);
+
+ if (depns->rtable_names != NIL)
+ {
+ should_qualify = true;
+ break;
+ }
+ }
+ if (should_qualify)
+ {
+ appendStringInfoString(context->buf, quote_identifier(dpns->funcname));
+ appendStringInfoChar(context->buf, '.');
+ }
+
+ appendStringInfoString(context->buf, quote_identifier(argname));
+ return;
+ }
+ }
+ }
+
+ /*
+ * Not PARAM_EXEC, or couldn't find referent: for base types just print $N.
+ * For composite types, add cast to the parameter to ease remote node detect
+ * the type.
+ */
+ if (param->paramtype >= FirstNormalObjectId)
+ {
+ char *typeName = format_type_with_typemod(param->paramtype, param->paramtypmod);
+
+ appendStringInfo(context->buf, "$%d::%s", param->paramid, typeName);
+ }
+ else
+ {
+ appendStringInfo(context->buf, "$%d", param->paramid);
+ }
+}
+
+/*
+ * get_simple_binary_op_name
+ *
+ * helper function for isSimpleNode
+ * will return single char binary operator name, or NULL if it's not
+ */
+static const char *
+get_simple_binary_op_name(OpExpr *expr)
+{
+ List *args = expr->args;
+
+ if (list_length(args) == 2)
+ {
+ /* binary operator */
+ Node *arg1 = (Node *) linitial(args);
+ Node *arg2 = (Node *) lsecond(args);
+ const char *op;
+
+ op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2));
+ if (strlen(op) == 1)
+ return op;
+ }
+ return NULL;
+}
+
+/*
+ * isSimpleNode - check if given node is simple (doesn't need parenthesizing)
+ *
+ * true : simple in the context of parent node's type
+ * false : not simple
+ */
+static bool
+isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
+{
+ if (!node)
+ return false;
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ case T_Const:
+ case T_Param:
+ case T_CoerceToDomainValue:
+ case T_SetToDefault:
+ case T_CurrentOfExpr:
+ /* single words: always simple */
+ return true;
+
+ case T_SubscriptingRef:
+ case T_ArrayExpr:
+ case T_RowExpr:
+ case T_CoalesceExpr:
+ case T_MinMaxExpr:
+ case T_SQLValueFunction:
+ case T_XmlExpr:
+ case T_NextValueExpr:
+ case T_NullIfExpr:
+ case T_Aggref:
+ case T_GroupingFunc:
+ case T_WindowFunc:
+ case T_FuncExpr:
+ case T_JsonConstructorExpr:
+ /* function-like: name(..) or name[..] */
+ return true;
+
+ /* CASE keywords act as parentheses */
+ case T_CaseExpr:
+ return true;
+
+ case T_FieldSelect:
+
+ /*
+ * appears simple since . has top precedence, unless parent is
+ * T_FieldSelect itself!
+ */
+ return !IsA(parentNode, FieldSelect);
+
+ case T_FieldStore:
+
+ /*
+ * treat like FieldSelect (probably doesn't matter)
+ */
+ return !IsA(parentNode, FieldStore);
+
+ case T_CoerceToDomain:
+ /* maybe simple, check args */
+ return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg,
+ node, prettyFlags);
+ case T_RelabelType:
+ return isSimpleNode((Node *) ((RelabelType *) node)->arg,
+ node, prettyFlags);
+ case T_CoerceViaIO:
+ return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg,
+ node, prettyFlags);
+ case T_ArrayCoerceExpr:
+ return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg,
+ node, prettyFlags);
+ case T_ConvertRowtypeExpr:
+ return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg,
+ node, prettyFlags);
+
+ case T_OpExpr:
+ {
+ /* depends on parent node type; needs further checking */
+ if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr))
+ {
+ const char *op;
+ const char *parentOp;
+ bool is_lopriop;
+ bool is_hipriop;
+ bool is_lopriparent;
+ bool is_hipriparent;
+
+ op = get_simple_binary_op_name((OpExpr *) node);
+ if (!op)
+ return false;
+
+ /* We know only the basic operators + - and * / % */
+ is_lopriop = (strchr("+-", *op) != NULL);
+ is_hipriop = (strchr("*/%", *op) != NULL);
+ if (!(is_lopriop || is_hipriop))
+ return false;
+
+ parentOp = get_simple_binary_op_name((OpExpr *) parentNode);
+ if (!parentOp)
+ return false;
+
+ is_lopriparent = (strchr("+-", *parentOp) != NULL);
+ is_hipriparent = (strchr("*/%", *parentOp) != NULL);
+ if (!(is_lopriparent || is_hipriparent))
+ return false;
+
+ if (is_hipriop && is_lopriparent)
+ return true; /* op binds tighter than parent */
+
+ if (is_lopriop && is_hipriparent)
+ return false;
+
+ /*
+ * Operators are same priority --- can skip parens only if
+ * we have (a - b) - c, not a - (b - c).
+ */
+ if (node == (Node *) linitial(((OpExpr *) parentNode)->args))
+ return true;
+
+ return false;
+ }
+ /* else do the same stuff as for T_SubLink et al. */
+ }
+ /* FALLTHROUGH */
+
+ case T_SubLink:
+ case T_NullTest:
+ case T_BooleanTest:
+ case T_DistinctExpr:
+ case T_JsonIsPredicate:
+ switch (nodeTag(parentNode))
+ {
+ case T_FuncExpr:
+ {
+ /* special handling for casts and COERCE_SQL_SYNTAX */
+ CoercionForm type = ((FuncExpr *) parentNode)->funcformat;
+
+ if (type == COERCE_EXPLICIT_CAST ||
+ type == COERCE_IMPLICIT_CAST ||
+ type == COERCE_SQL_SYNTAX)
+ return false;
+ return true; /* own parentheses */
+ }
+ case T_BoolExpr: /* lower precedence */
+ case T_SubscriptingRef: /* other separators */
+ case T_ArrayExpr: /* other separators */
+ case T_RowExpr: /* other separators */
+ case T_CoalesceExpr: /* own parentheses */
+ case T_MinMaxExpr: /* own parentheses */
+ case T_XmlExpr: /* own parentheses */
+ case T_NullIfExpr: /* other separators */
+ case T_Aggref: /* own parentheses */
+ case T_GroupingFunc: /* own parentheses */
+ case T_WindowFunc: /* own parentheses */
+ case T_CaseExpr: /* other separators */
+ return true;
+ default:
+ return false;
+ }
+
+ case T_BoolExpr:
+ switch (nodeTag(parentNode))
+ {
+ case T_BoolExpr:
+ if (prettyFlags & PRETTYFLAG_PAREN)
+ {
+ BoolExprType type;
+ BoolExprType parentType;
+
+ type = ((BoolExpr *) node)->boolop;
+ parentType = ((BoolExpr *) parentNode)->boolop;
+ switch (type)
+ {
+ case NOT_EXPR:
+ case AND_EXPR:
+ if (parentType == AND_EXPR || parentType == OR_EXPR)
+ return true;
+ break;
+ case OR_EXPR:
+ if (parentType == OR_EXPR)
+ return true;
+ break;
+ }
+ }
+ return false;
+ case T_FuncExpr:
+ {
+ /* special handling for casts and COERCE_SQL_SYNTAX */
+ CoercionForm type = ((FuncExpr *) parentNode)->funcformat;
+
+ if (type == COERCE_EXPLICIT_CAST ||
+ type == COERCE_IMPLICIT_CAST ||
+ type == COERCE_SQL_SYNTAX)
+ return false;
+ return true; /* own parentheses */
+ }
+ case T_SubscriptingRef: /* other separators */
+ case T_ArrayExpr: /* other separators */
+ case T_RowExpr: /* other separators */
+ case T_CoalesceExpr: /* own parentheses */
+ case T_MinMaxExpr: /* own parentheses */
+ case T_XmlExpr: /* own parentheses */
+ case T_NullIfExpr: /* other separators */
+ case T_Aggref: /* own parentheses */
+ case T_GroupingFunc: /* own parentheses */
+ case T_WindowFunc: /* own parentheses */
+ case T_CaseExpr: /* other separators */
+ return true;
+ default:
+ return false;
+ }
+
+ case T_JsonValueExpr:
+ /* maybe simple, check args */
+ return isSimpleNode((Node *) ((JsonValueExpr *) node)->raw_expr,
+ node, prettyFlags);
+
+ default:
+ break;
+ }
+ /* those we don't know: in dubio complexo */
+ return false;
+}
+
+/*
+ * appendContextKeyword - append a keyword to buffer
+ *
+ * If prettyPrint is enabled, perform a line break, and adjust indentation.
+ * Otherwise, just append the keyword.
+ */
+static void
+appendContextKeyword(deparse_context *context, const char *str,
+ int indentBefore, int indentAfter, int indentPlus)
+{
+ StringInfo buf = context->buf;
+
+ if (PRETTY_INDENT(context))
+ {
+ int indentAmount;
+
+ context->indentLevel += indentBefore;
+
+ /* remove any trailing spaces currently in the buffer ... */
+ removeStringInfoSpaces(buf);
+ /* ... then add a newline and some spaces */
+ appendStringInfoChar(buf, '\n');
+
+ if (context->indentLevel < PRETTYINDENT_LIMIT)
+ indentAmount = Max(context->indentLevel, 0) + indentPlus;
+ else
+ {
+ /*
+ * If we're indented more than PRETTYINDENT_LIMIT characters, try
+ * to conserve horizontal space by reducing the per-level
+ * indentation. For best results the scale factor here should
+ * divide all the indent amounts that get added to indentLevel
+ * (PRETTYINDENT_STD, etc). It's important that the indentation
+ * not grow unboundedly, else deeply-nested trees use O(N^2)
+ * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT.
+ */
+ indentAmount = PRETTYINDENT_LIMIT +
+ (context->indentLevel - PRETTYINDENT_LIMIT) /
+ (PRETTYINDENT_STD / 2);
+ indentAmount %= PRETTYINDENT_LIMIT;
+ /* scale/wrap logic affects indentLevel, but not indentPlus */
+ indentAmount += indentPlus;
+ }
+ appendStringInfoSpaces(buf, indentAmount);
+
+ appendStringInfoString(buf, str);
+
+ context->indentLevel += indentAfter;
+ if (context->indentLevel < 0)
+ context->indentLevel = 0;
+ }
+ else
+ appendStringInfoString(buf, str);
+}
+
+/*
+ * removeStringInfoSpaces - delete trailing spaces from a buffer.
+ *
+ * Possibly this should move to stringinfo.c at some point.
+ */
+static void
+removeStringInfoSpaces(StringInfo str)
+{
+ while (str->len > 0 && str->data[str->len - 1] == ' ')
+ str->data[--(str->len)] = '\0';
+}
+
+/*
+ * get_rule_expr_paren - deparse expr using get_rule_expr,
+ * embracing the string with parentheses if necessary for prettyPrint.
+ *
+ * Never embrace if prettyFlags=0, because it's done in the calling node.
+ *
+ * Any node that does *not* embrace its argument node by sql syntax (with
+ * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should
+ * use get_rule_expr_paren instead of get_rule_expr so parentheses can be
+ * added.
+ */
+static void
+get_rule_expr_paren(Node *node, deparse_context *context,
+ bool showimplicit, Node *parentNode)
+{
+ bool need_paren;
+
+ need_paren = PRETTY_PAREN(context) &&
+ !isSimpleNode(node, parentNode, context->prettyFlags);
+
+ if (need_paren)
+ appendStringInfoChar(context->buf, '(');
+
+ get_rule_expr(node, context, showimplicit);
+
+ if (need_paren)
+ appendStringInfoChar(context->buf, ')');
+}
+
+/* ----------
+ * get_rule_expr - Parse back an expression
+ *
+ * Note: showimplicit determines whether we display any implicit cast that
+ * is present at the top of the expression tree. It is a passed argument,
+ * not a field of the context struct, because we change the value as we
+ * recurse down into the expression. In general we suppress implicit casts
+ * when the result type is known with certainty (eg, the arguments of an
+ * OR must be boolean). We display implicit casts for arguments of functions
+ * and operators, since this is needed to be certain that the same function
+ * or operator will be chosen when the expression is re-parsed.
+ * ----------
+ */
+static void
+get_rule_expr(Node *node, deparse_context *context,
+ bool showimplicit)
+{
+ StringInfo buf = context->buf;
+
+ if (node == NULL)
+ return;
+
+ /* Guard against excessively long or deeply-nested queries */
+ CHECK_FOR_INTERRUPTS();
+ check_stack_depth();
+
+ /*
+ * Each level of get_rule_expr must emit an indivisible term
+ * (parenthesized if necessary) to ensure result is reparsed into the same
+ * expression tree. The only exception is that when the input is a List,
+ * we emit the component items comma-separated with no surrounding
+ * decoration; this is convenient for most callers.
+ */
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ (void) get_variable((Var *) node, 0, false, context);
+ break;
+
+ case T_Const:
+ get_const_expr((Const *) node, context, 0);
+ break;
+
+ case T_Param:
+ get_parameter((Param *) node, context);
+ break;
+
+ case T_Aggref:
+ get_agg_expr((Aggref *) node, context, (Aggref *) node);
+ break;
+
+ case T_GroupingFunc:
+ {
+ GroupingFunc *gexpr = (GroupingFunc *) node;
+
+ appendStringInfoString(buf, "GROUPING(");
+ get_rule_expr((Node *) gexpr->args, context, true);
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_WindowFunc:
+ get_windowfunc_expr((WindowFunc *) node, context);
+ break;
+
+ case T_SubscriptingRef:
+ {
+ SubscriptingRef *sbsref = (SubscriptingRef *) node;
+ bool need_parens;
+
+ /*
+ * If the argument is a CaseTestExpr, we must be inside a
+ * FieldStore, ie, we are assigning to an element of an array
+ * within a composite column. Since we already punted on
+ * displaying the FieldStore's target information, just punt
+ * here too, and display only the assignment source
+ * expression.
+ */
+ if (IsA(sbsref->refexpr, CaseTestExpr))
+ {
+ Assert(sbsref->refassgnexpr);
+ get_rule_expr((Node *) sbsref->refassgnexpr,
+ context, showimplicit);
+ break;
+ }
+
+ /*
+ * Parenthesize the argument unless it's a simple Var or a
+ * FieldSelect. (In particular, if it's another
+ * SubscriptingRef, we *must* parenthesize to avoid
+ * confusion.)
+ */
+ need_parens = !IsA(sbsref->refexpr, Var) &&
+ !IsA(sbsref->refexpr, FieldSelect);
+ if (need_parens)
+ appendStringInfoChar(buf, '(');
+ get_rule_expr((Node *) sbsref->refexpr, context, showimplicit);
+ if (need_parens)
+ appendStringInfoChar(buf, ')');
+
+ /*
+ * If there's a refassgnexpr, we want to print the node in the
+ * format "container[subscripts] := refassgnexpr". This is
+ * not legal SQL, so decompilation of INSERT or UPDATE
+ * statements should always use processIndirection as part of
+ * the statement-level syntax. We should only see this when
+ * EXPLAIN tries to print the targetlist of a plan resulting
+ * from such a statement.
+ */
+ if (sbsref->refassgnexpr)
+ {
+ Node *refassgnexpr;
+
+ /*
+ * Use processIndirection to print this node's subscripts
+ * as well as any additional field selections or
+ * subscripting in immediate descendants. It returns the
+ * RHS expr that is actually being "assigned".
+ */
+ refassgnexpr = processIndirection(node, context);
+ appendStringInfoString(buf, " := ");
+ get_rule_expr(refassgnexpr, context, showimplicit);
+ }
+ else
+ {
+ /* Just an ordinary container fetch, so print subscripts */
+ printSubscripts(sbsref, context);
+ }
+ }
+ break;
+
+ case T_FuncExpr:
+ get_func_expr((FuncExpr *) node, context, showimplicit);
+ break;
+
+ case T_NamedArgExpr:
+ {
+ NamedArgExpr *na = (NamedArgExpr *) node;
+
+ appendStringInfo(buf, "%s => ", quote_identifier(na->name));
+ get_rule_expr((Node *) na->arg, context, showimplicit);
+ }
+ break;
+
+ case T_OpExpr:
+ get_oper_expr((OpExpr *) node, context);
+ break;
+
+ case T_DistinctExpr:
+ {
+ DistinctExpr *expr = (DistinctExpr *) node;
+ List *args = expr->args;
+ Node *arg1 = (Node *) linitial(args);
+ Node *arg2 = (Node *) lsecond(args);
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren(arg1, context, true, node);
+ appendStringInfoString(buf, " IS DISTINCT FROM ");
+ get_rule_expr_paren(arg2, context, true, node);
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_NullIfExpr:
+ {
+ NullIfExpr *nullifexpr = (NullIfExpr *) node;
+
+ appendStringInfoString(buf, "NULLIF(");
+ get_rule_expr((Node *) nullifexpr->args, context, true);
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_ScalarArrayOpExpr:
+ {
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+ List *args = expr->args;
+ Node *arg1 = (Node *) linitial(args);
+ Node *arg2 = (Node *) lsecond(args);
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren(arg1, context, true, node);
+ appendStringInfo(buf, " %s %s (",
+ generate_operator_name(expr->opno,
+ exprType(arg1),
+ get_base_element_type(exprType(arg2))),
+ expr->useOr ? "ANY" : "ALL");
+ get_rule_expr_paren(arg2, context, true, node);
+
+ /*
+ * There's inherent ambiguity in "x op ANY/ALL (y)" when y is
+ * a bare sub-SELECT. Since we're here, the sub-SELECT must
+ * be meant as a scalar sub-SELECT yielding an array value to
+ * be used in ScalarArrayOpExpr; but the grammar will
+ * preferentially interpret such a construct as an ANY/ALL
+ * SubLink. To prevent misparsing the output that way, insert
+ * a dummy coercion (which will be stripped by parse analysis,
+ * so no inefficiency is added in dump and reload). This is
+ * indeed most likely what the user wrote to get the construct
+ * accepted in the first place.
+ */
+ if (IsA(arg2, SubLink) &&
+ ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK)
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(exprType(arg2),
+ exprTypmod(arg2)));
+ appendStringInfoChar(buf, ')');
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_BoolExpr:
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+ Node *first_arg = linitial(expr->args);
+ ListCell *arg;
+
+ switch (expr->boolop)
+ {
+ case AND_EXPR:
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren(first_arg, context,
+ false, node);
+ for_each_from(arg, expr->args, 1)
+ {
+ appendStringInfoString(buf, " AND ");
+ get_rule_expr_paren((Node *) lfirst(arg), context,
+ false, node);
+ }
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ break;
+
+ case OR_EXPR:
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren(first_arg, context,
+ false, node);
+ for_each_from(arg, expr->args, 1)
+ {
+ appendStringInfoString(buf, " OR ");
+ get_rule_expr_paren((Node *) lfirst(arg), context,
+ false, node);
+ }
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ break;
+
+ case NOT_EXPR:
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ appendStringInfoString(buf, "NOT ");
+ get_rule_expr_paren(first_arg, context,
+ false, node);
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ break;
+
+ default:
+ elog(ERROR, "unrecognized boolop: %d",
+ (int) expr->boolop);
+ }
+ }
+ break;
+
+ case T_SubLink:
+ get_sublink_expr((SubLink *) node, context);
+ break;
+
+ case T_SubPlan:
+ {
+ SubPlan *subplan = (SubPlan *) node;
+
+ /*
+ * We cannot see an already-planned subplan in rule deparsing,
+ * only while EXPLAINing a query plan. We don't try to
+ * reconstruct the original SQL, just reference the subplan
+ * that appears elsewhere in EXPLAIN's result.
+ */
+ if (subplan->useHashTable)
+ appendStringInfo(buf, "(hashed %s)", subplan->plan_name);
+ else
+ appendStringInfo(buf, "(%s)", subplan->plan_name);
+ }
+ break;
+
+ case T_AlternativeSubPlan:
+ {
+ AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
+ ListCell *lc;
+
+ /*
+ * This case cannot be reached in normal usage, since no
+ * AlternativeSubPlan can appear either in parsetrees or
+ * finished plan trees. We keep it just in case somebody
+ * wants to use this code to print planner data structures.
+ */
+ appendStringInfoString(buf, "(alternatives: ");
+ foreach(lc, asplan->subplans)
+ {
+ SubPlan *splan = lfirst_node(SubPlan, lc);
+
+ if (splan->useHashTable)
+ appendStringInfo(buf, "hashed %s", splan->plan_name);
+ else
+ appendStringInfoString(buf, splan->plan_name);
+ if (lnext(asplan->subplans, lc))
+ appendStringInfoString(buf, " or ");
+ }
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_FieldSelect:
+ {
+ FieldSelect *fselect = (FieldSelect *) node;
+ Node *arg = (Node *) fselect->arg;
+ int fno = fselect->fieldnum;
+ const char *fieldname;
+ bool need_parens;
+
+ /*
+ * Parenthesize the argument unless it's an SubscriptingRef or
+ * another FieldSelect. Note in particular that it would be
+ * WRONG to not parenthesize a Var argument; simplicity is not
+ * the issue here, having the right number of names is.
+ */
+ need_parens = !IsA(arg, SubscriptingRef) &&
+ !IsA(arg, FieldSelect);
+ if (need_parens)
+ appendStringInfoChar(buf, '(');
+ get_rule_expr(arg, context, true);
+ if (need_parens)
+ appendStringInfoChar(buf, ')');
+
+ /*
+ * Get and print the field name.
+ */
+ fieldname = get_name_for_var_field((Var *) arg, fno,
+ 0, context);
+ appendStringInfo(buf, ".%s", quote_identifier(fieldname));
+ }
+ break;
+
+ case T_FieldStore:
+ {
+ FieldStore *fstore = (FieldStore *) node;
+ bool need_parens;
+
+ /*
+ * There is no good way to represent a FieldStore as real SQL,
+ * so decompilation of INSERT or UPDATE statements should
+ * always use processIndirection as part of the
+ * statement-level syntax. We should only get here when
+ * EXPLAIN tries to print the targetlist of a plan resulting
+ * from such a statement. The plan case is even harder than
+ * ordinary rules would be, because the planner tries to
+ * collapse multiple assignments to the same field or subfield
+ * into one FieldStore; so we can see a list of target fields
+ * not just one, and the arguments could be FieldStores
+ * themselves. We don't bother to try to print the target
+ * field names; we just print the source arguments, with a
+ * ROW() around them if there's more than one. This isn't
+ * terribly complete, but it's probably good enough for
+ * EXPLAIN's purposes; especially since anything more would be
+ * either hopelessly confusing or an even poorer
+ * representation of what the plan is actually doing.
+ */
+ need_parens = (list_length(fstore->newvals) != 1);
+ if (need_parens)
+ appendStringInfoString(buf, "ROW(");
+ get_rule_expr((Node *) fstore->newvals, context, showimplicit);
+ if (need_parens)
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_RelabelType:
+ {
+ RelabelType *relabel = (RelabelType *) node;
+ Node *arg = (Node *) relabel->arg;
+
+ if (relabel->relabelformat == COERCE_IMPLICIT_CAST &&
+ !showimplicit)
+ {
+ /* don't show the implicit cast */
+ get_rule_expr_paren(arg, context, false, node);
+ }
+ else
+ {
+ get_coercion_expr(arg, context,
+ relabel->resulttype,
+ relabel->resulttypmod,
+ node);
+ }
+ }
+ break;
+
+ case T_CoerceViaIO:
+ {
+ CoerceViaIO *iocoerce = (CoerceViaIO *) node;
+ Node *arg = (Node *) iocoerce->arg;
+
+ if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST &&
+ !showimplicit)
+ {
+ /* don't show the implicit cast */
+ get_rule_expr_paren(arg, context, false, node);
+ }
+ else
+ {
+ get_coercion_expr(arg, context,
+ iocoerce->resulttype,
+ -1,
+ node);
+ }
+ }
+ break;
+
+ case T_ArrayCoerceExpr:
+ {
+ ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
+ Node *arg = (Node *) acoerce->arg;
+
+ if (acoerce->coerceformat == COERCE_IMPLICIT_CAST &&
+ !showimplicit)
+ {
+ /* don't show the implicit cast */
+ get_rule_expr_paren(arg, context, false, node);
+ }
+ else
+ {
+ get_coercion_expr(arg, context,
+ acoerce->resulttype,
+ acoerce->resulttypmod,
+ node);
+ }
+ }
+ break;
+
+ case T_ConvertRowtypeExpr:
+ {
+ ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node;
+ Node *arg = (Node *) convert->arg;
+
+ if (convert->convertformat == COERCE_IMPLICIT_CAST &&
+ !showimplicit)
+ {
+ /* don't show the implicit cast */
+ get_rule_expr_paren(arg, context, false, node);
+ }
+ else
+ {
+ get_coercion_expr(arg, context,
+ convert->resulttype, -1,
+ node);
+ }
+ }
+ break;
+
+ case T_CollateExpr:
+ {
+ CollateExpr *collate = (CollateExpr *) node;
+ Node *arg = (Node *) collate->arg;
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren(arg, context, showimplicit, node);
+ appendStringInfo(buf, " COLLATE %s",
+ generate_collation_name(collate->collOid));
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_CaseExpr:
+ {
+ CaseExpr *caseexpr = (CaseExpr *) node;
+ ListCell *temp;
+
+ appendContextKeyword(context, "CASE",
+ 0, PRETTYINDENT_VAR, 0);
+ if (caseexpr->arg)
+ {
+ appendStringInfoChar(buf, ' ');
+ get_rule_expr((Node *) caseexpr->arg, context, true);
+ }
+ foreach(temp, caseexpr->args)
+ {
+ CaseWhen *when = (CaseWhen *) lfirst(temp);
+ Node *w = (Node *) when->expr;
+
+ if (caseexpr->arg)
+ {
+ /*
+ * The parser should have produced WHEN clauses of the
+ * form "CaseTestExpr = RHS", possibly with an
+ * implicit coercion inserted above the CaseTestExpr.
+ * For accurate decompilation of rules it's essential
+ * that we show just the RHS. However in an
+ * expression that's been through the optimizer, the
+ * WHEN clause could be almost anything (since the
+ * equality operator could have been expanded into an
+ * inline function). If we don't recognize the form
+ * of the WHEN clause, just punt and display it as-is.
+ */
+ if (IsA(w, OpExpr))
+ {
+ List *args = ((OpExpr *) w)->args;
+
+ if (list_length(args) == 2 &&
+ IsA(strip_implicit_coercions(linitial(args)),
+ CaseTestExpr))
+ w = (Node *) lsecond(args);
+ }
+ }
+
+ if (!PRETTY_INDENT(context))
+ appendStringInfoChar(buf, ' ');
+ appendContextKeyword(context, "WHEN ",
+ 0, 0, 0);
+ get_rule_expr(w, context, false);
+ appendStringInfoString(buf, " THEN ");
+ get_rule_expr((Node *) when->result, context, true);
+ }
+ if (!PRETTY_INDENT(context))
+ appendStringInfoChar(buf, ' ');
+ appendContextKeyword(context, "ELSE ",
+ 0, 0, 0);
+ get_rule_expr((Node *) caseexpr->defresult, context, true);
+ if (!PRETTY_INDENT(context))
+ appendStringInfoChar(buf, ' ');
+ appendContextKeyword(context, "END",
+ -PRETTYINDENT_VAR, 0, 0);
+ }
+ break;
+
+ case T_CaseTestExpr:
+ {
+ /*
+ * Normally we should never get here, since for expressions
+ * that can contain this node type we attempt to avoid
+ * recursing to it. But in an optimized expression we might
+ * be unable to avoid that (see comments for CaseExpr). If we
+ * do see one, print it as CASE_TEST_EXPR.
+ */
+ appendStringInfoString(buf, "CASE_TEST_EXPR");
+ }
+ break;
+
+ case T_ArrayExpr:
+ {
+ ArrayExpr *arrayexpr = (ArrayExpr *) node;
+
+ appendStringInfoString(buf, "ARRAY[");
+ get_rule_expr((Node *) arrayexpr->elements, context, true);
+ appendStringInfoChar(buf, ']');
+
+ /*
+ * If the array isn't empty, we assume its elements are
+ * coerced to the desired type. If it's empty, though, we
+ * need an explicit coercion to the array type.
+ */
+ if (arrayexpr->elements == NIL)
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(arrayexpr->array_typeid, -1));
+ }
+ break;
+
+ case T_RowExpr:
+ {
+ RowExpr *rowexpr = (RowExpr *) node;
+ TupleDesc tupdesc = NULL;
+ ListCell *arg;
+ int i;
+ char *sep;
+
+ /*
+ * If it's a named type and not RECORD, we may have to skip
+ * dropped columns and/or claim there are NULLs for added
+ * columns.
+ */
+ if (rowexpr->row_typeid != RECORDOID)
+ {
+ tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1);
+ Assert(list_length(rowexpr->args) <= tupdesc->natts);
+ }
+
+ /*
+ * SQL99 allows "ROW" to be omitted when there is more than
+ * one column, but for simplicity we always print it.
+ */
+ appendStringInfoString(buf, "ROW(");
+ sep = "";
+ i = 0;
+ foreach(arg, rowexpr->args)
+ {
+ Node *e = (Node *) lfirst(arg);
+
+ if (tupdesc == NULL ||
+ !TupleDescAttr(tupdesc, i)->attisdropped)
+ {
+ appendStringInfoString(buf, sep);
+ /* Whole-row Vars need special treatment here */
+ get_rule_expr_toplevel(e, context, true);
+ sep = ", ";
+ }
+ i++;
+ }
+ if (tupdesc != NULL)
+ {
+ while (i < tupdesc->natts)
+ {
+ if (!TupleDescAttr(tupdesc, i)->attisdropped)
+ {
+ appendStringInfoString(buf, sep);
+ appendStringInfoString(buf, "NULL");
+ sep = ", ";
+ }
+ i++;
+ }
+
+ ReleaseTupleDesc(tupdesc);
+ }
+ appendStringInfoChar(buf, ')');
+ if (rowexpr->row_format == COERCE_EXPLICIT_CAST)
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(rowexpr->row_typeid, -1));
+ }
+ break;
+
+ case T_RowCompareExpr:
+ {
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+
+ /*
+ * SQL99 allows "ROW" to be omitted when there is more than
+ * one column, but for simplicity we always print it. Within
+ * a ROW expression, whole-row Vars need special treatment, so
+ * use get_rule_list_toplevel.
+ */
+ appendStringInfoString(buf, "(ROW(");
+ get_rule_list_toplevel(rcexpr->largs, context, true);
+
+ /*
+ * We assume that the name of the first-column operator will
+ * do for all the rest too. This is definitely open to
+ * failure, eg if some but not all operators were renamed
+ * since the construct was parsed, but there seems no way to
+ * be perfect.
+ */
+ appendStringInfo(buf, ") %s ROW(",
+ generate_operator_name(linitial_oid(rcexpr->opnos),
+ exprType(linitial(rcexpr->largs)),
+ exprType(linitial(rcexpr->rargs))));
+ get_rule_list_toplevel(rcexpr->rargs, context, true);
+ appendStringInfoString(buf, "))");
+ }
+ break;
+
+ case T_CoalesceExpr:
+ {
+ CoalesceExpr *coalesceexpr = (CoalesceExpr *) node;
+
+ appendStringInfoString(buf, "COALESCE(");
+ get_rule_expr((Node *) coalesceexpr->args, context, true);
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_MinMaxExpr:
+ {
+ MinMaxExpr *minmaxexpr = (MinMaxExpr *) node;
+
+ switch (minmaxexpr->op)
+ {
+ case IS_GREATEST:
+ appendStringInfoString(buf, "GREATEST(");
+ break;
+ case IS_LEAST:
+ appendStringInfoString(buf, "LEAST(");
+ break;
+ }
+ get_rule_expr((Node *) minmaxexpr->args, context, true);
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_SQLValueFunction:
+ {
+ SQLValueFunction *svf = (SQLValueFunction *) node;
+
+ /*
+ * Note: this code knows that typmod for time, timestamp, and
+ * timestamptz just prints as integer.
+ */
+ switch (svf->op)
+ {
+ case SVFOP_CURRENT_DATE:
+ appendStringInfoString(buf, "CURRENT_DATE");
+ break;
+ case SVFOP_CURRENT_TIME:
+ appendStringInfoString(buf, "CURRENT_TIME");
+ break;
+ case SVFOP_CURRENT_TIME_N:
+ appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod);
+ break;
+ case SVFOP_CURRENT_TIMESTAMP:
+ appendStringInfoString(buf, "CURRENT_TIMESTAMP");
+ break;
+ case SVFOP_CURRENT_TIMESTAMP_N:
+ appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)",
+ svf->typmod);
+ break;
+ case SVFOP_LOCALTIME:
+ appendStringInfoString(buf, "LOCALTIME");
+ break;
+ case SVFOP_LOCALTIME_N:
+ appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod);
+ break;
+ case SVFOP_LOCALTIMESTAMP:
+ appendStringInfoString(buf, "LOCALTIMESTAMP");
+ break;
+ case SVFOP_LOCALTIMESTAMP_N:
+ appendStringInfo(buf, "LOCALTIMESTAMP(%d)",
+ svf->typmod);
+ break;
+ case SVFOP_CURRENT_ROLE:
+ appendStringInfoString(buf, "CURRENT_ROLE");
+ break;
+ case SVFOP_CURRENT_USER:
+ appendStringInfoString(buf, "CURRENT_USER");
+ break;
+ case SVFOP_USER:
+ appendStringInfoString(buf, "USER");
+ break;
+ case SVFOP_SESSION_USER:
+ appendStringInfoString(buf, "SESSION_USER");
+ break;
+ case SVFOP_CURRENT_CATALOG:
+ appendStringInfoString(buf, "CURRENT_CATALOG");
+ break;
+ case SVFOP_CURRENT_SCHEMA:
+ appendStringInfoString(buf, "CURRENT_SCHEMA");
+ break;
+ }
+ }
+ break;
+
+ case T_XmlExpr:
+ {
+ XmlExpr *xexpr = (XmlExpr *) node;
+ bool needcomma = false;
+ ListCell *arg;
+ ListCell *narg;
+ Const *con;
+
+ switch (xexpr->op)
+ {
+ case IS_XMLCONCAT:
+ appendStringInfoString(buf, "XMLCONCAT(");
+ break;
+ case IS_XMLELEMENT:
+ appendStringInfoString(buf, "XMLELEMENT(");
+ break;
+ case IS_XMLFOREST:
+ appendStringInfoString(buf, "XMLFOREST(");
+ break;
+ case IS_XMLPARSE:
+ appendStringInfoString(buf, "XMLPARSE(");
+ break;
+ case IS_XMLPI:
+ appendStringInfoString(buf, "XMLPI(");
+ break;
+ case IS_XMLROOT:
+ appendStringInfoString(buf, "XMLROOT(");
+ break;
+ case IS_XMLSERIALIZE:
+ appendStringInfoString(buf, "XMLSERIALIZE(");
+ break;
+ case IS_DOCUMENT:
+ break;
+ }
+ if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE)
+ {
+ if (xexpr->xmloption == XMLOPTION_DOCUMENT)
+ appendStringInfoString(buf, "DOCUMENT ");
+ else
+ appendStringInfoString(buf, "CONTENT ");
+ }
+ if (xexpr->name)
+ {
+ appendStringInfo(buf, "NAME %s",
+ quote_identifier(map_xml_name_to_sql_identifier(xexpr->name)));
+ needcomma = true;
+ }
+ if (xexpr->named_args)
+ {
+ if (xexpr->op != IS_XMLFOREST)
+ {
+ if (needcomma)
+ appendStringInfoString(buf, ", ");
+ appendStringInfoString(buf, "XMLATTRIBUTES(");
+ needcomma = false;
+ }
+ forboth(arg, xexpr->named_args, narg, xexpr->arg_names)
+ {
+ Node *e = (Node *) lfirst(arg);
+ char *argname = strVal(lfirst(narg));
+
+ if (needcomma)
+ appendStringInfoString(buf, ", ");
+ get_rule_expr((Node *) e, context, true);
+ appendStringInfo(buf, " AS %s",
+ quote_identifier(map_xml_name_to_sql_identifier(argname)));
+ needcomma = true;
+ }
+ if (xexpr->op != IS_XMLFOREST)
+ appendStringInfoChar(buf, ')');
+ }
+ if (xexpr->args)
+ {
+ if (needcomma)
+ appendStringInfoString(buf, ", ");
+ switch (xexpr->op)
+ {
+ case IS_XMLCONCAT:
+ case IS_XMLELEMENT:
+ case IS_XMLFOREST:
+ case IS_XMLPI:
+ case IS_XMLSERIALIZE:
+ /* no extra decoration needed */
+ get_rule_expr((Node *) xexpr->args, context, true);
+ break;
+ case IS_XMLPARSE:
+ Assert(list_length(xexpr->args) == 2);
+
+ get_rule_expr((Node *) linitial(xexpr->args),
+ context, true);
+
+ con = lsecond_node(Const, xexpr->args);
+ Assert(!con->constisnull);
+ if (DatumGetBool(con->constvalue))
+ appendStringInfoString(buf,
+ " PRESERVE WHITESPACE");
+ else
+ appendStringInfoString(buf,
+ " STRIP WHITESPACE");
+ break;
+ case IS_XMLROOT:
+ Assert(list_length(xexpr->args) == 3);
+
+ get_rule_expr((Node *) linitial(xexpr->args),
+ context, true);
+
+ appendStringInfoString(buf, ", VERSION ");
+ con = (Const *) lsecond(xexpr->args);
+ if (IsA(con, Const) &&
+ con->constisnull)
+ appendStringInfoString(buf, "NO VALUE");
+ else
+ get_rule_expr((Node *) con, context, false);
+
+ con = lthird_node(Const, xexpr->args);
+ if (con->constisnull)
+ /* suppress STANDALONE NO VALUE */ ;
+ else
+ {
+ switch (DatumGetInt32(con->constvalue))
+ {
+ case XML_STANDALONE_YES:
+ appendStringInfoString(buf,
+ ", STANDALONE YES");
+ break;
+ case XML_STANDALONE_NO:
+ appendStringInfoString(buf,
+ ", STANDALONE NO");
+ break;
+ case XML_STANDALONE_NO_VALUE:
+ appendStringInfoString(buf,
+ ", STANDALONE NO VALUE");
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case IS_DOCUMENT:
+ get_rule_expr_paren((Node *) xexpr->args, context, false, node);
+ break;
+ }
+ }
+ if (xexpr->op == IS_XMLSERIALIZE)
+ appendStringInfo(buf, " AS %s",
+ format_type_with_typemod(xexpr->type,
+ xexpr->typmod));
+ if (xexpr->op == IS_DOCUMENT)
+ appendStringInfoString(buf, " IS DOCUMENT");
+ else
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_NullTest:
+ {
+ NullTest *ntest = (NullTest *) node;
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren((Node *) ntest->arg, context, true, node);
+
+ /*
+ * For scalar inputs, we prefer to print as IS [NOT] NULL,
+ * which is shorter and traditional. If it's a rowtype input
+ * but we're applying a scalar test, must print IS [NOT]
+ * DISTINCT FROM NULL to be semantically correct.
+ */
+ if (ntest->argisrow ||
+ !type_is_rowtype(exprType((Node *) ntest->arg)))
+ {
+ switch (ntest->nulltesttype)
+ {
+ case IS_NULL:
+ appendStringInfoString(buf, " IS NULL");
+ break;
+ case IS_NOT_NULL:
+ appendStringInfoString(buf, " IS NOT NULL");
+ break;
+ default:
+ elog(ERROR, "unrecognized nulltesttype: %d",
+ (int) ntest->nulltesttype);
+ }
+ }
+ else
+ {
+ switch (ntest->nulltesttype)
+ {
+ case IS_NULL:
+ appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL");
+ break;
+ case IS_NOT_NULL:
+ appendStringInfoString(buf, " IS DISTINCT FROM NULL");
+ break;
+ default:
+ elog(ERROR, "unrecognized nulltesttype: %d",
+ (int) ntest->nulltesttype);
+ }
+ }
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_BooleanTest:
+ {
+ BooleanTest *btest = (BooleanTest *) node;
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren((Node *) btest->arg, context, false, node);
+ switch (btest->booltesttype)
+ {
+ case IS_TRUE:
+ appendStringInfoString(buf, " IS TRUE");
+ break;
+ case IS_NOT_TRUE:
+ appendStringInfoString(buf, " IS NOT TRUE");
+ break;
+ case IS_FALSE:
+ appendStringInfoString(buf, " IS FALSE");
+ break;
+ case IS_NOT_FALSE:
+ appendStringInfoString(buf, " IS NOT FALSE");
+ break;
+ case IS_UNKNOWN:
+ appendStringInfoString(buf, " IS UNKNOWN");
+ break;
+ case IS_NOT_UNKNOWN:
+ appendStringInfoString(buf, " IS NOT UNKNOWN");
+ break;
+ default:
+ elog(ERROR, "unrecognized booltesttype: %d",
+ (int) btest->booltesttype);
+ }
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_CoerceToDomain:
+ {
+ CoerceToDomain *ctest = (CoerceToDomain *) node;
+ Node *arg = (Node *) ctest->arg;
+
+ if (ctest->coercionformat == COERCE_IMPLICIT_CAST &&
+ !showimplicit)
+ {
+ /* don't show the implicit cast */
+ get_rule_expr(arg, context, false);
+ }
+ else
+ {
+ get_coercion_expr(arg, context,
+ ctest->resulttype,
+ ctest->resulttypmod,
+ node);
+ }
+ }
+ break;
+
+ case T_CoerceToDomainValue:
+ appendStringInfoString(buf, "VALUE");
+ break;
+
+ case T_SetToDefault:
+ appendStringInfoString(buf, "DEFAULT");
+ break;
+
+ case T_CurrentOfExpr:
+ {
+ CurrentOfExpr *cexpr = (CurrentOfExpr *) node;
+
+ if (cexpr->cursor_name)
+ appendStringInfo(buf, "CURRENT OF %s",
+ quote_identifier(cexpr->cursor_name));
+ else
+ appendStringInfo(buf, "CURRENT OF $%d",
+ cexpr->cursor_param);
+ }
+ break;
+
+ case T_NextValueExpr:
+ {
+ NextValueExpr *nvexpr = (NextValueExpr *) node;
+
+ /*
+ * This isn't exactly nextval(), but that seems close enough
+ * for EXPLAIN's purposes.
+ */
+ appendStringInfoString(buf, "nextval(");
+ simple_quote_literal(buf,
+ generate_relation_name(nvexpr->seqid,
+ NIL));
+ appendStringInfoChar(buf, ')');
+ }
+ break;
+
+ case T_InferenceElem:
+ {
+ InferenceElem *iexpr = (InferenceElem *) node;
+ bool save_varprefix;
+ bool need_parens;
+
+ /*
+ * InferenceElem can only refer to target relation, so a
+ * prefix is not useful, and indeed would cause parse errors.
+ */
+ save_varprefix = context->varprefix;
+ context->varprefix = false;
+
+ /*
+ * Parenthesize the element unless it's a simple Var or a bare
+ * function call. Follows pg_get_indexdef_worker().
+ */
+ need_parens = !IsA(iexpr->expr, Var);
+ if (IsA(iexpr->expr, FuncExpr) &&
+ ((FuncExpr *) iexpr->expr)->funcformat ==
+ COERCE_EXPLICIT_CALL)
+ need_parens = false;
+
+ if (need_parens)
+ appendStringInfoChar(buf, '(');
+ get_rule_expr((Node *) iexpr->expr,
+ context, false);
+ if (need_parens)
+ appendStringInfoChar(buf, ')');
+
+ context->varprefix = save_varprefix;
+
+ if (iexpr->infercollid)
+ appendStringInfo(buf, " COLLATE %s",
+ generate_collation_name(iexpr->infercollid));
+
+ /* Add the operator class name, if not default */
+ if (iexpr->inferopclass)
+ {
+ Oid inferopclass = iexpr->inferopclass;
+ Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
+
+ get_opclass_name(inferopclass, inferopcinputtype, buf);
+ }
+ }
+ break;
+
+ case T_PartitionBoundSpec:
+ {
+ PartitionBoundSpec *spec = (PartitionBoundSpec *) node;
+ ListCell *cell;
+ char *sep;
+
+ if (spec->is_default)
+ {
+ appendStringInfoString(buf, "DEFAULT");
+ break;
+ }
+
+ switch (spec->strategy)
+ {
+ case PARTITION_STRATEGY_HASH:
+ Assert(spec->modulus > 0 && spec->remainder >= 0);
+ Assert(spec->modulus > spec->remainder);
+
+ appendStringInfoString(buf, "FOR VALUES");
+ appendStringInfo(buf, " WITH (modulus %d, remainder %d)",
+ spec->modulus, spec->remainder);
+ break;
+
+ case PARTITION_STRATEGY_LIST:
+ Assert(spec->listdatums != NIL);
+
+ appendStringInfoString(buf, "FOR VALUES IN (");
+ sep = "";
+ foreach(cell, spec->listdatums)
+ {
+ Const *val = lfirst_node(Const, cell);
+
+ appendStringInfoString(buf, sep);
+ get_const_expr(val, context, -1);
+ sep = ", ";
+ }
+
+ appendStringInfoChar(buf, ')');
+ break;
+
+ case PARTITION_STRATEGY_RANGE:
+ Assert(spec->lowerdatums != NIL &&
+ spec->upperdatums != NIL &&
+ list_length(spec->lowerdatums) ==
+ list_length(spec->upperdatums));
+
+ appendStringInfo(buf, "FOR VALUES FROM %s TO %s",
+ get_range_partbound_string(spec->lowerdatums),
+ get_range_partbound_string(spec->upperdatums));
+ break;
+
+ default:
+ elog(ERROR, "unrecognized partition strategy: %d",
+ (int) spec->strategy);
+ break;
+ }
+ }
+ break;
+
+ case T_JsonValueExpr:
+ {
+ JsonValueExpr *jve = (JsonValueExpr *) node;
+
+ get_rule_expr((Node *) jve->raw_expr, context, false);
+ get_json_format(jve->format, context->buf);
+ }
+ break;
+
+ case T_JsonConstructorExpr:
+ get_json_constructor((JsonConstructorExpr *) node, context, false);
+ break;
+
+ case T_JsonIsPredicate:
+ {
+ JsonIsPredicate *pred = (JsonIsPredicate *) node;
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(context->buf, '(');
+
+ get_rule_expr_paren(pred->expr, context, true, node);
+
+ appendStringInfoString(context->buf, " IS JSON");
+
+ /* TODO: handle FORMAT clause */
+
+ switch (pred->item_type)
+ {
+ case JS_TYPE_SCALAR:
+ appendStringInfoString(context->buf, " SCALAR");
+ break;
+ case JS_TYPE_ARRAY:
+ appendStringInfoString(context->buf, " ARRAY");
+ break;
+ case JS_TYPE_OBJECT:
+ appendStringInfoString(context->buf, " OBJECT");
+ break;
+ default:
+ break;
+ }
+
+ if (pred->unique_keys)
+ appendStringInfoString(context->buf, " WITH UNIQUE KEYS");
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(context->buf, ')');
+ }
+ break;
+
+ case T_List:
+ {
+ char *sep;
+ ListCell *l;
+
+ sep = "";
+ foreach(l, (List *) node)
+ {
+ appendStringInfoString(buf, sep);
+ get_rule_expr((Node *) lfirst(l), context, showimplicit);
+ sep = ", ";
+ }
+ }
+ break;
+
+ case T_TableFunc:
+ get_tablefunc((TableFunc *) node, context, showimplicit);
+ break;
+
+ case T_CallStmt:
+ get_proc_expr((CallStmt *) node, context, showimplicit);
+ break;
+
+ default:
+ elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node));
+ break;
+ }
+}
+
+/*
+ * get_rule_expr_toplevel - Parse back a toplevel expression
+ *
+ * Same as get_rule_expr(), except that if the expr is just a Var, we pass
+ * istoplevel = true not false to get_variable(). This causes whole-row Vars
+ * to get printed with decoration that will prevent expansion of "*".
+ * We need to use this in contexts such as ROW() and VALUES(), where the
+ * parser would expand "foo.*" appearing at top level. (In principle we'd
+ * use this in get_target_list() too, but that has additional worries about
+ * whether to print AS, so it needs to invoke get_variable() directly anyway.)
+ */
+static void
+get_rule_expr_toplevel(Node *node, deparse_context *context,
+ bool showimplicit)
+{
+ if (node && IsA(node, Var))
+ (void) get_variable((Var *) node, 0, true, context);
+ else
+ get_rule_expr(node, context, showimplicit);
+}
+
+/*
+ * get_rule_list_toplevel - Parse back a list of toplevel expressions
+ *
+ * Apply get_rule_expr_toplevel() to each element of a List.
+ *
+ * This adds commas between the expressions, but caller is responsible
+ * for printing surrounding decoration.
+ */
+static void
+get_rule_list_toplevel(List *lst, deparse_context *context,
+ bool showimplicit)
+{
+ const char *sep;
+ ListCell *lc;
+
+ sep = "";
+ foreach(lc, lst)
+ {
+ Node *e = (Node *) lfirst(lc);
+
+ appendStringInfoString(context->buf, sep);
+ get_rule_expr_toplevel(e, context, showimplicit);
+ sep = ", ";
+ }
+}
+
+/*
+ * get_rule_expr_funccall - Parse back a function-call expression
+ *
+ * Same as get_rule_expr(), except that we guarantee that the output will
+ * look like a function call, or like one of the things the grammar treats as
+ * equivalent to a function call (see the func_expr_windowless production).
+ * This is needed in places where the grammar uses func_expr_windowless and
+ * you can't substitute a parenthesized a_expr. If what we have isn't going
+ * to look like a function call, wrap it in a dummy CAST() expression, which
+ * will satisfy the grammar --- and, indeed, is likely what the user wrote to
+ * produce such a thing.
+ */
+static void
+get_rule_expr_funccall(Node *node, deparse_context *context,
+ bool showimplicit)
+{
+ if (looks_like_function(node))
+ get_rule_expr(node, context, showimplicit);
+ else
+ {
+ StringInfo buf = context->buf;
+
+ appendStringInfoString(buf, "CAST(");
+ /* no point in showing any top-level implicit cast */
+ get_rule_expr(node, context, false);
+ appendStringInfo(buf, " AS %s)",
+ format_type_with_typemod(exprType(node),
+ exprTypmod(node)));
+ }
+}
+
+/*
+ * Helper function to identify node types that satisfy func_expr_windowless.
+ * If in doubt, "false" is always a safe answer.
+ */
+static bool
+looks_like_function(Node *node)
+{
+ if (node == NULL)
+ return false; /* probably shouldn't happen */
+ switch (nodeTag(node))
+ {
+ case T_FuncExpr:
+ /* OK, unless it's going to deparse as a cast */
+ return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL ||
+ ((FuncExpr *) node)->funcformat == COERCE_SQL_SYNTAX);
+ case T_NullIfExpr:
+ case T_CoalesceExpr:
+ case T_MinMaxExpr:
+ case T_SQLValueFunction:
+ case T_XmlExpr:
+ /* these are all accepted by func_expr_common_subexpr */
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/*
+ * get_oper_expr - Parse back an OpExpr node
+ */
+static void
+get_oper_expr(OpExpr *expr, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ Oid opno = expr->opno;
+ List *args = expr->args;
+
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ if (list_length(args) == 2)
+ {
+ /* binary operator */
+ Node *arg1 = (Node *) linitial(args);
+ Node *arg2 = (Node *) lsecond(args);
+
+ get_rule_expr_paren(arg1, context, true, (Node *) expr);
+ appendStringInfo(buf, " %s ",
+ generate_operator_name(opno,
+ exprType(arg1),
+ exprType(arg2)));
+ get_rule_expr_paren(arg2, context, true, (Node *) expr);
+ }
+ else
+ {
+ /* prefix operator */
+ Node *arg = (Node *) linitial(args);
+
+ appendStringInfo(buf, "%s ",
+ generate_operator_name(opno,
+ InvalidOid,
+ exprType(arg)));
+ get_rule_expr_paren(arg, context, true, (Node *) expr);
+ }
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * get_func_expr - Parse back a FuncExpr node
+ */
+static void
+get_func_expr(FuncExpr *expr, deparse_context *context,
+ bool showimplicit)
+{
+ StringInfo buf = context->buf;
+ Oid funcoid = expr->funcid;
+ Oid argtypes[FUNC_MAX_ARGS];
+ int nargs;
+ List *argnames;
+ bool use_variadic;
+ ListCell *l;
+
+ /*
+ * If the function call came from an implicit coercion, then just show the
+ * first argument --- unless caller wants to see implicit coercions.
+ */
+ if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit)
+ {
+ get_rule_expr_paren((Node *) linitial(expr->args), context,
+ false, (Node *) expr);
+ return;
+ }
+
+ /*
+ * If the function call came from a cast, then show the first argument
+ * plus an explicit cast operation.
+ */
+ if (expr->funcformat == COERCE_EXPLICIT_CAST ||
+ expr->funcformat == COERCE_IMPLICIT_CAST)
+ {
+ Node *arg = linitial(expr->args);
+ Oid rettype = expr->funcresulttype;
+ int32 coercedTypmod;
+
+ /* Get the typmod if this is a length-coercion function */
+ (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod);
+
+ get_coercion_expr(arg, context,
+ rettype, coercedTypmod,
+ (Node *) expr);
+
+ return;
+ }
+
+ /*
+ * If the function was called using one of the SQL spec's random special
+ * syntaxes, try to reproduce that. If we don't recognize the function,
+ * fall through.
+ */
+ if (expr->funcformat == COERCE_SQL_SYNTAX)
+ {
+ if (get_func_sql_syntax(expr, context))
+ return;
+ }
+
+
+ /*
+ * Normal function: display as proname(args). First we need to extract
+ * the argument datatypes.
+ */
+ if (list_length(expr->args) > FUNC_MAX_ARGS)
+ ereport(ERROR,
+ (errcode(ERRCODE_TOO_MANY_ARGUMENTS),
+ errmsg("too many arguments")));
+ nargs = 0;
+ argnames = NIL;
+ foreach(l, expr->args)
+ {
+ Node *arg = (Node *) lfirst(l);
+
+ if (IsA(arg, NamedArgExpr))
+ argnames = lappend(argnames, ((NamedArgExpr *) arg)->name);
+ argtypes[nargs] = exprType(arg);
+ nargs++;
+ }
+
+ appendStringInfo(buf, "%s(",
+ generate_function_name(funcoid, nargs,
+ argnames, argtypes,
+ expr->funcvariadic,
+ &use_variadic,
+ context->special_exprkind));
+ nargs = 0;
+ foreach(l, expr->args)
+ {
+ if (nargs++ > 0)
+ appendStringInfoString(buf, ", ");
+ if (use_variadic && lnext(expr->args, l) == NULL)
+ appendStringInfoString(buf, "VARIADIC ");
+ get_rule_expr((Node *) lfirst(l), context, true);
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * get_proc_expr - Parse back a CallStmt node
+ */
+static void
+get_proc_expr(CallStmt *stmt, deparse_context *context,
+ bool showimplicit)
+{
+ StringInfo buf = context->buf;
+ Oid functionOid = stmt->funcexpr->funcid;
+ bool use_variadic;
+ Oid *argumentTypes;
+ List *finalArgumentList = NIL;
+ ListCell *argumentCell;
+ List *namedArgList = NIL;
+ int numberOfArgs = -1;
+
+ if (!get_merged_argument_list(stmt, &namedArgList, &argumentTypes,
+ &finalArgumentList, &numberOfArgs))
+ {
+ /* Nothing merged i.e. no OUT arguments */
+ get_func_expr((FuncExpr *) stmt->funcexpr, context, showimplicit);
+ return;
+ }
+
+ appendStringInfo(buf, "%s(",
+ generate_function_name(functionOid, numberOfArgs,
+ namedArgList, argumentTypes,
+ stmt->funcexpr->funcvariadic,
+ &use_variadic,
+ context->special_exprkind));
+ int argNumber = 0;
+ foreach(argumentCell, finalArgumentList)
+ {
+ if (argNumber++ > 0)
+ appendStringInfoString(buf, ", ");
+ if (use_variadic && lnext(finalArgumentList, argumentCell) == NULL)
+ appendStringInfoString(buf, "VARIADIC ");
+ get_rule_expr((Node *) lfirst(argumentCell), context, true);
+ argNumber++;
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * get_agg_expr - Parse back an Aggref node
+ */
+static void
+get_agg_expr(Aggref *aggref, deparse_context *context,
+ Aggref *original_aggref)
+{
+ get_agg_expr_helper(aggref, context, original_aggref, NULL, NULL,
+ false);
+}
+
+/*
+ * get_agg_expr_helper - subroutine for get_agg_expr and
+ * get_json_agg_constructor
+ */
+static void
+get_agg_expr_helper(Aggref *aggref, deparse_context *context,
+ Aggref *original_aggref, const char *funcname,
+ const char *options, bool is_json_objectagg)
+{
+ StringInfo buf = context->buf;
+ Oid argtypes[FUNC_MAX_ARGS];
+ int nargs;
+ bool use_variadic = false;
+
+ /*
+ * For a combining aggregate, we look up and deparse the corresponding
+ * partial aggregate instead. This is necessary because our input
+ * argument list has been replaced; the new argument list always has just
+ * one element, which will point to a partial Aggref that supplies us with
+ * transition states to combine.
+ */
+ if (DO_AGGSPLIT_COMBINE(aggref->aggsplit))
+ {
+ TargetEntry *tle;
+
+
+ Assert(list_length(aggref->args) == 1);
+ tle = linitial_node(TargetEntry, aggref->args);
+ resolve_special_varno((Node *) tle->expr, context,
+ get_agg_combine_expr, original_aggref);
+ return;
+ }
+
+ /*
+ * Mark as PARTIAL, if appropriate. We look to the original aggref so as
+ * to avoid printing this when recursing from the code just above.
+ */
+ if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit))
+ appendStringInfoString(buf, "PARTIAL ");
+
+ /* Extract the argument types as seen by the parser */
+ nargs = get_aggregate_argtypes(aggref, argtypes);
+
+ if (!funcname)
+ funcname = generate_function_name(aggref->aggfnoid, nargs, NIL,
+ argtypes, aggref->aggvariadic,
+ &use_variadic,
+ context->special_exprkind);
+
+ /* Print the aggregate name, schema-qualified if needed */
+ appendStringInfo(buf, "%s(%s", funcname,
+ (aggref->aggdistinct != NIL) ? "DISTINCT " : "");
+
+ if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
+ {
+ /*
+ * Ordered-set aggregates do not use "*" syntax. Also, we needn't
+ * worry about inserting VARIADIC. So we can just dump the direct
+ * args as-is.
+ */
+ Assert(!aggref->aggvariadic);
+ get_rule_expr((Node *) aggref->aggdirectargs, context, true);
+ Assert(aggref->aggorder != NIL);
+ appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY ");
+ get_rule_orderby(aggref->aggorder, aggref->args, false, context);
+ }
+ else
+ {
+ /* aggstar can be set only in zero-argument aggregates */
+ if (aggref->aggstar)
+ appendStringInfoChar(buf, '*');
+ else
+ {
+ ListCell *l;
+ int i;
+
+ i = 0;
+ foreach(l, aggref->args)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+ Node *arg = (Node *) tle->expr;
+
+ Assert(!IsA(arg, NamedArgExpr));
+ if (tle->resjunk)
+ continue;
+ if (i++ > 0)
+ {
+ if (is_json_objectagg)
+ {
+ /*
+ * the ABSENT ON NULL and WITH UNIQUE args are printed
+ * separately, so ignore them here
+ */
+ if (i > 2)
+ break;
+
+ appendStringInfoString(buf, " : ");
+ }
+ else
+ appendStringInfoString(buf, ", ");
+ }
+ if (use_variadic && i == nargs)
+ appendStringInfoString(buf, "VARIADIC ");
+ get_rule_expr(arg, context, true);
+ }
+ }
+
+ if (aggref->aggorder != NIL)
+ {
+ appendStringInfoString(buf, " ORDER BY ");
+ get_rule_orderby(aggref->aggorder, aggref->args, false, context);
+ }
+ }
+
+ if (options)
+ appendStringInfoString(buf, options);
+
+ if (aggref->aggfilter != NULL)
+ {
+ appendStringInfoString(buf, ") FILTER (WHERE ");
+ get_rule_expr((Node *) aggref->aggfilter, context, false);
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * This is a helper function for get_agg_expr(). It's used when we deparse
+ * a combining Aggref; resolve_special_varno locates the corresponding partial
+ * Aggref and then calls this.
+ */
+static void
+get_agg_combine_expr(Node *node, deparse_context *context, void *callback_arg)
+{
+ Aggref *aggref;
+ Aggref *original_aggref = callback_arg;
+
+ if (!IsA(node, Aggref))
+ elog(ERROR, "combining Aggref does not point to an Aggref");
+
+ aggref = (Aggref *) node;
+ get_agg_expr(aggref, context, original_aggref);
+}
+
+/*
+ * get_windowfunc_expr - Parse back a WindowFunc node
+ */
+static void
+get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context)
+{
+ get_windowfunc_expr_helper(wfunc, context, NULL, NULL, false);
+}
+
+
+/*
+ * get_windowfunc_expr_helper - subroutine for get_windowfunc_expr and
+ * get_json_agg_constructor
+ */
+static void
+get_windowfunc_expr_helper(WindowFunc *wfunc, deparse_context *context,
+ const char *funcname, const char *options,
+ bool is_json_objectagg)
+{
+ StringInfo buf = context->buf;
+ Oid argtypes[FUNC_MAX_ARGS];
+ int nargs;
+ List *argnames;
+ ListCell *l;
+
+ if (list_length(wfunc->args) > FUNC_MAX_ARGS)
+ ereport(ERROR,
+ (errcode(ERRCODE_TOO_MANY_ARGUMENTS),
+ errmsg("too many arguments")));
+ nargs = 0;
+ argnames = NIL;
+ foreach(l, wfunc->args)
+ {
+ Node *arg = (Node *) lfirst(l);
+
+ if (IsA(arg, NamedArgExpr))
+ argnames = lappend(argnames, ((NamedArgExpr *) arg)->name);
+ argtypes[nargs] = exprType(arg);
+ nargs++;
+ }
+
+ if (!funcname)
+ funcname = generate_function_name(wfunc->winfnoid, nargs, argnames,
+ argtypes, false, NULL,
+ context->special_exprkind);
+
+ appendStringInfo(buf, "%s(", funcname);
+
+ /* winstar can be set only in zero-argument aggregates */
+ if (wfunc->winstar)
+ appendStringInfoChar(buf, '*');
+ else
+ {
+ if (is_json_objectagg)
+ {
+ get_rule_expr((Node *) linitial(wfunc->args), context, false);
+ appendStringInfoString(buf, " : ");
+ get_rule_expr((Node *) lsecond(wfunc->args), context, false);
+ }
+ else
+ get_rule_expr((Node *) wfunc->args, context, true);
+ }
+
+ if (options)
+ appendStringInfoString(buf, options);
+
+ if (wfunc->aggfilter != NULL)
+ {
+ appendStringInfoString(buf, ") FILTER (WHERE ");
+ get_rule_expr((Node *) wfunc->aggfilter, context, false);
+ }
+
+ appendStringInfoString(buf, ") OVER ");
+
+ foreach(l, context->windowClause)
+ {
+ WindowClause *wc = (WindowClause *) lfirst(l);
+
+ if (wc->winref == wfunc->winref)
+ {
+ if (wc->name)
+ appendStringInfoString(buf, quote_identifier(wc->name));
+ else
+ get_rule_windowspec(wc, context->windowTList, context);
+ break;
+ }
+ }
+ if (l == NULL)
+ {
+ if (context->windowClause)
+ elog(ERROR, "could not find window clause for winref %u",
+ wfunc->winref);
+
+ /*
+ * In EXPLAIN, we don't have window context information available, so
+ * we have to settle for this:
+ */
+ appendStringInfoString(buf, "(?)");
+ }
+}
+
+/*
+ * get_func_sql_syntax - Parse back a SQL-syntax function call
+ *
+ * Returns true if we successfully deparsed, false if we did not
+ * recognize the function.
+ */
+static bool
+get_func_sql_syntax(FuncExpr *expr, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ Oid funcoid = expr->funcid;
+
+ switch (funcoid)
+ {
+ case F_TIMEZONE_INTERVAL_TIMESTAMP:
+ case F_TIMEZONE_INTERVAL_TIMESTAMPTZ:
+ case F_TIMEZONE_INTERVAL_TIMETZ:
+ case F_TIMEZONE_TEXT_TIMESTAMP:
+ case F_TIMEZONE_TEXT_TIMESTAMPTZ:
+ case F_TIMEZONE_TEXT_TIMETZ:
+ /* AT TIME ZONE ... note reversed argument order */
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren((Node *) lsecond(expr->args), context, false,
+ (Node *) expr);
+ appendStringInfoString(buf, " AT TIME ZONE ");
+ get_rule_expr_paren((Node *) linitial(expr->args), context, false,
+ (Node *) expr);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_OVERLAPS_TIMESTAMPTZ_INTERVAL_TIMESTAMPTZ_INTERVAL:
+ case F_OVERLAPS_TIMESTAMPTZ_INTERVAL_TIMESTAMPTZ_TIMESTAMPTZ:
+ case F_OVERLAPS_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ_INTERVAL:
+ case F_OVERLAPS_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ_TIMESTAMPTZ:
+ case F_OVERLAPS_TIMESTAMP_INTERVAL_TIMESTAMP_INTERVAL:
+ case F_OVERLAPS_TIMESTAMP_INTERVAL_TIMESTAMP_TIMESTAMP:
+ case F_OVERLAPS_TIMESTAMP_TIMESTAMP_TIMESTAMP_INTERVAL:
+ case F_OVERLAPS_TIMESTAMP_TIMESTAMP_TIMESTAMP_TIMESTAMP:
+ case F_OVERLAPS_TIMETZ_TIMETZ_TIMETZ_TIMETZ:
+ case F_OVERLAPS_TIME_INTERVAL_TIME_INTERVAL:
+ case F_OVERLAPS_TIME_INTERVAL_TIME_TIME:
+ case F_OVERLAPS_TIME_TIME_TIME_INTERVAL:
+ case F_OVERLAPS_TIME_TIME_TIME_TIME:
+ /* (x1, x2) OVERLAPS (y1, y2) */
+ appendStringInfoString(buf, "((");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoString(buf, ", ");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ appendStringInfoString(buf, ") OVERLAPS (");
+ get_rule_expr((Node *) lthird(expr->args), context, false);
+ appendStringInfoString(buf, ", ");
+ get_rule_expr((Node *) lfourth(expr->args), context, false);
+ appendStringInfoString(buf, "))");
+ return true;
+
+ case F_EXTRACT_TEXT_DATE:
+ case F_EXTRACT_TEXT_TIME:
+ case F_EXTRACT_TEXT_TIMETZ:
+ case F_EXTRACT_TEXT_TIMESTAMP:
+ case F_EXTRACT_TEXT_TIMESTAMPTZ:
+ case F_EXTRACT_TEXT_INTERVAL:
+ /* EXTRACT (x FROM y) */
+ appendStringInfoString(buf, "EXTRACT(");
+ {
+ Const *con = (Const *) linitial(expr->args);
+
+ Assert(IsA(con, Const) &&
+ con->consttype == TEXTOID &&
+ !con->constisnull);
+ appendStringInfoString(buf, TextDatumGetCString(con->constvalue));
+ }
+ appendStringInfoString(buf, " FROM ");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_IS_NORMALIZED:
+ /* IS xxx NORMALIZED */
+ appendStringInfoString(buf, "(");
+ get_rule_expr_paren((Node *) linitial(expr->args), context, false,
+ (Node *) expr);
+ appendStringInfoString(buf, " IS");
+ if (list_length(expr->args) == 2)
+ {
+ Const *con = (Const *) lsecond(expr->args);
+
+ Assert(IsA(con, Const) &&
+ con->consttype == TEXTOID &&
+ !con->constisnull);
+ appendStringInfo(buf, " %s",
+ TextDatumGetCString(con->constvalue));
+ }
+ appendStringInfoString(buf, " NORMALIZED)");
+ return true;
+
+ case F_PG_COLLATION_FOR:
+ /* COLLATION FOR */
+ appendStringInfoString(buf, "COLLATION FOR (");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_NORMALIZE:
+ /* NORMALIZE() */
+ appendStringInfoString(buf, "NORMALIZE(");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ if (list_length(expr->args) == 2)
+ {
+ Const *con = (Const *) lsecond(expr->args);
+
+ Assert(IsA(con, Const) &&
+ con->consttype == TEXTOID &&
+ !con->constisnull);
+ appendStringInfo(buf, ", %s",
+ TextDatumGetCString(con->constvalue));
+ }
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_OVERLAY_BIT_BIT_INT4:
+ case F_OVERLAY_BIT_BIT_INT4_INT4:
+ case F_OVERLAY_BYTEA_BYTEA_INT4:
+ case F_OVERLAY_BYTEA_BYTEA_INT4_INT4:
+ case F_OVERLAY_TEXT_TEXT_INT4:
+ case F_OVERLAY_TEXT_TEXT_INT4_INT4:
+ /* OVERLAY() */
+ appendStringInfoString(buf, "OVERLAY(");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoString(buf, " PLACING ");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ appendStringInfoString(buf, " FROM ");
+ get_rule_expr((Node *) lthird(expr->args), context, false);
+ if (list_length(expr->args) == 4)
+ {
+ appendStringInfoString(buf, " FOR ");
+ get_rule_expr((Node *) lfourth(expr->args), context, false);
+ }
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_POSITION_BIT_BIT:
+ case F_POSITION_BYTEA_BYTEA:
+ case F_POSITION_TEXT_TEXT:
+ /* POSITION() ... extra parens since args are b_expr not a_expr */
+ appendStringInfoString(buf, "POSITION((");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ appendStringInfoString(buf, ") IN (");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoString(buf, "))");
+ return true;
+
+ case F_SUBSTRING_BIT_INT4:
+ case F_SUBSTRING_BIT_INT4_INT4:
+ case F_SUBSTRING_BYTEA_INT4:
+ case F_SUBSTRING_BYTEA_INT4_INT4:
+ case F_SUBSTRING_TEXT_INT4:
+ case F_SUBSTRING_TEXT_INT4_INT4:
+ /* SUBSTRING FROM/FOR (i.e., integer-position variants) */
+ appendStringInfoString(buf, "SUBSTRING(");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoString(buf, " FROM ");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ if (list_length(expr->args) == 3)
+ {
+ appendStringInfoString(buf, " FOR ");
+ get_rule_expr((Node *) lthird(expr->args), context, false);
+ }
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_SUBSTRING_TEXT_TEXT_TEXT:
+ /* SUBSTRING SIMILAR/ESCAPE */
+ appendStringInfoString(buf, "SUBSTRING(");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoString(buf, " SIMILAR ");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ appendStringInfoString(buf, " ESCAPE ");
+ get_rule_expr((Node *) lthird(expr->args), context, false);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_BTRIM_BYTEA_BYTEA:
+ case F_BTRIM_TEXT:
+ case F_BTRIM_TEXT_TEXT:
+ /* TRIM() */
+ appendStringInfoString(buf, "TRIM(BOTH");
+ if (list_length(expr->args) == 2)
+ {
+ appendStringInfoChar(buf, ' ');
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ }
+ appendStringInfoString(buf, " FROM ");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_LTRIM_BYTEA_BYTEA:
+ case F_LTRIM_TEXT:
+ case F_LTRIM_TEXT_TEXT:
+ /* TRIM() */
+ appendStringInfoString(buf, "TRIM(LEADING");
+ if (list_length(expr->args) == 2)
+ {
+ appendStringInfoChar(buf, ' ');
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ }
+ appendStringInfoString(buf, " FROM ");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_RTRIM_BYTEA_BYTEA:
+ case F_RTRIM_TEXT:
+ case F_RTRIM_TEXT_TEXT:
+ /* TRIM() */
+ appendStringInfoString(buf, "TRIM(TRAILING");
+ if (list_length(expr->args) == 2)
+ {
+ appendStringInfoChar(buf, ' ');
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ }
+ appendStringInfoString(buf, " FROM ");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoChar(buf, ')');
+ return true;
+
+ case F_SYSTEM_USER:
+ appendStringInfoString(buf, "SYSTEM_USER");
+ return true;
+
+ case F_XMLEXISTS:
+ /* XMLEXISTS ... extra parens because args are c_expr */
+ appendStringInfoString(buf, "XMLEXISTS((");
+ get_rule_expr((Node *) linitial(expr->args), context, false);
+ appendStringInfoString(buf, ") PASSING (");
+ get_rule_expr((Node *) lsecond(expr->args), context, false);
+ appendStringInfoString(buf, "))");
+ return true;
+ }
+ return false;
+}
+
+/* ----------
+ * get_coercion_expr
+ *
+ * Make a string representation of a value coerced to a specific type
+ * ----------
+ */
+static void
+get_coercion_expr(Node *arg, deparse_context *context,
+ Oid resulttype, int32 resulttypmod,
+ Node *parentNode)
+{
+ StringInfo buf = context->buf;
+
+ /*
+ * Since parse_coerce.c doesn't immediately collapse application of
+ * length-coercion functions to constants, what we'll typically see in
+ * such cases is a Const with typmod -1 and a length-coercion function
+ * right above it. Avoid generating redundant output. However, beware of
+ * suppressing casts when the user actually wrote something like
+ * 'foo'::text::char(3).
+ *
+ * Note: it might seem that we are missing the possibility of needing to
+ * print a COLLATE clause for such a Const. However, a Const could only
+ * have nondefault collation in a post-constant-folding tree, in which the
+ * length coercion would have been folded too. See also the special
+ * handling of CollateExpr in coerce_to_target_type(): any collation
+ * marking will be above the coercion node, not below it.
+ */
+ if (arg && IsA(arg, Const) &&
+ ((Const *) arg)->consttype == resulttype &&
+ ((Const *) arg)->consttypmod == -1)
+ {
+ /* Show the constant without normal ::typename decoration */
+ get_const_expr((Const *) arg, context, -1);
+ }
+ else
+ {
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr_paren(arg, context, false, parentNode);
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(resulttype, resulttypmod));
+}
+
+/* ----------
+ * get_const_expr
+ *
+ * Make a string representation of a Const
+ *
+ * showtype can be -1 to never show "::typename" decoration, or +1 to always
+ * show it, or 0 to show it only if the constant wouldn't be assumed to be
+ * the right type by default.
+ *
+ * If the Const's collation isn't default for its type, show that too.
+ * We mustn't do this when showtype is -1 (since that means the caller will
+ * print "::typename", and we can't put a COLLATE clause in between). It's
+ * caller's responsibility that collation isn't missed in such cases.
+ * ----------
+ */
+static void
+get_const_expr(Const *constval, deparse_context *context, int showtype)
+{
+ StringInfo buf = context->buf;
+ Oid typoutput;
+ bool typIsVarlena;
+ char *extval;
+ bool needlabel = false;
+
+ if (constval->constisnull)
+ {
+ /*
+ * Always label the type of a NULL constant to prevent misdecisions
+ * about type when reparsing.
+ */
+ appendStringInfoString(buf, "NULL");
+ if (showtype >= 0)
+ {
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(constval->consttype,
+ constval->consttypmod));
+ get_const_collation(constval, context);
+ }
+ return;
+ }
+
+ getTypeOutputInfo(constval->consttype,
+ &typoutput, &typIsVarlena);
+
+ extval = OidOutputFunctionCall(typoutput, constval->constvalue);
+
+ switch (constval->consttype)
+ {
+ case INT4OID:
+
+ /*
+ * INT4 can be printed without any decoration, unless it is
+ * negative; in that case print it as '-nnn'::integer to ensure
+ * that the output will re-parse as a constant, not as a constant
+ * plus operator. In most cases we could get away with printing
+ * (-nnn) instead, because of the way that gram.y handles negative
+ * literals; but that doesn't work for INT_MIN, and it doesn't
+ * seem that much prettier anyway.
+ */
+ if (extval[0] != '-')
+ appendStringInfoString(buf, extval);
+ else
+ {
+ appendStringInfo(buf, "'%s'", extval);
+ needlabel = true; /* we must attach a cast */
+ }
+ break;
+
+ case NUMERICOID:
+
+ /*
+ * NUMERIC can be printed without quotes if it looks like a float
+ * constant (not an integer, and not Infinity or NaN) and doesn't
+ * have a leading sign (for the same reason as for INT4).
+ */
+ if (isdigit((unsigned char) extval[0]) &&
+ strcspn(extval, "eE.") != strlen(extval))
+ {
+ appendStringInfoString(buf, extval);
+ }
+ else
+ {
+ appendStringInfo(buf, "'%s'", extval);
+ needlabel = true; /* we must attach a cast */
+ }
+ break;
+
+ case BITOID:
+ case VARBITOID:
+ appendStringInfo(buf, "B'%s'", extval);
+ break;
+
+ case BOOLOID:
+ if (strcmp(extval, "t") == 0)
+ appendStringInfoString(buf, "true");
+ else
+ appendStringInfoString(buf, "false");
+ break;
+
+ default:
+ simple_quote_literal(buf, extval);
+ break;
+ }
+
+ pfree(extval);
+
+ if (showtype < 0)
+ return;
+
+ /*
+ * For showtype == 0, append ::typename unless the constant will be
+ * implicitly typed as the right type when it is read in.
+ *
+ * XXX this code has to be kept in sync with the behavior of the parser,
+ * especially make_const.
+ */
+ switch (constval->consttype)
+ {
+ case BOOLOID:
+ case UNKNOWNOID:
+ /* These types can be left unlabeled */
+ needlabel = false;
+ break;
+ case INT4OID:
+ /* We determined above whether a label is needed */
+ break;
+ case NUMERICOID:
+
+ /*
+ * Float-looking constants will be typed as numeric, which we
+ * checked above; but if there's a nondefault typmod we need to
+ * show it.
+ */
+ needlabel |= (constval->consttypmod >= 0);
+ break;
+ default:
+ needlabel = true;
+ break;
+ }
+ if (needlabel || showtype > 0)
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(constval->consttype,
+ constval->consttypmod));
+
+ get_const_collation(constval, context);
+}
+
+/*
+ * helper for get_const_expr: append COLLATE if needed
+ */
+static void
+get_const_collation(Const *constval, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+
+ if (OidIsValid(constval->constcollid))
+ {
+ Oid typcollation = get_typcollation(constval->consttype);
+
+ if (constval->constcollid != typcollation)
+ {
+ appendStringInfo(buf, " COLLATE %s",
+ generate_collation_name(constval->constcollid));
+ }
+ }
+}
+
+/*
+ * get_json_format - Parse back a JsonFormat node
+ */
+static void
+get_json_format(JsonFormat *format, StringInfo buf)
+{
+ if (format->format_type == JS_FORMAT_DEFAULT)
+ return;
+
+ appendStringInfoString(buf,
+ format->format_type == JS_FORMAT_JSONB ?
+ " FORMAT JSONB" : " FORMAT JSON");
+
+ if (format->encoding != JS_ENC_DEFAULT)
+ {
+ const char *encoding;
+
+ encoding =
+ format->encoding == JS_ENC_UTF16 ? "UTF16" :
+ format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
+
+ appendStringInfo(buf, " ENCODING %s", encoding);
+ }
+}
+
+/*
+ * get_json_returning - Parse back a JsonReturning structure
+ */
+static void
+get_json_returning(JsonReturning *returning, StringInfo buf,
+ bool json_format_by_default)
+{
+ if (!OidIsValid(returning->typid))
+ return;
+
+ appendStringInfo(buf, " RETURNING %s",
+ format_type_with_typemod(returning->typid,
+ returning->typmod));
+
+ if (!json_format_by_default ||
+ returning->format->format_type !=
+ (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
+ get_json_format(returning->format, buf);
+}
+
+/*
+ * get_json_constructor - Parse back a JsonConstructorExpr node
+ */
+static void
+get_json_constructor(JsonConstructorExpr *ctor, deparse_context *context,
+ bool showimplicit)
+{
+ StringInfo buf = context->buf;
+ const char *funcname;
+ bool is_json_object;
+ int curridx;
+ ListCell *lc;
+
+ if (ctor->type == JSCTOR_JSON_OBJECTAGG)
+ {
+ get_json_agg_constructor(ctor, context, "JSON_OBJECTAGG", true);
+ return;
+ }
+ else if (ctor->type == JSCTOR_JSON_ARRAYAGG)
+ {
+ get_json_agg_constructor(ctor, context, "JSON_ARRAYAGG", false);
+ return;
+ }
+
+ switch (ctor->type)
+ {
+ case JSCTOR_JSON_OBJECT:
+ funcname = "JSON_OBJECT";
+ break;
+ case JSCTOR_JSON_ARRAY:
+ funcname = "JSON_ARRAY";
+ break;
+ default:
+ elog(ERROR, "invalid JsonConstructorType %d", ctor->type);
+ }
+
+ appendStringInfo(buf, "%s(", funcname);
+
+ is_json_object = ctor->type == JSCTOR_JSON_OBJECT;
+ foreach(lc, ctor->args)
+ {
+ curridx = foreach_current_index(lc);
+ if (curridx > 0)
+ {
+ const char *sep;
+
+ sep = (is_json_object && (curridx % 2) != 0) ? " : " : ", ";
+ appendStringInfoString(buf, sep);
+ }
+
+ get_rule_expr((Node *) lfirst(lc), context, true);
+ }
+
+ get_json_constructor_options(ctor, buf);
+ appendStringInfo(buf, ")");
+}
+
+/*
+ * Append options, if any, to the JSON constructor being deparsed
+ */
+static void
+get_json_constructor_options(JsonConstructorExpr *ctor, StringInfo buf)
+{
+ if (ctor->absent_on_null)
+ {
+ if (ctor->type == JSCTOR_JSON_OBJECT ||
+ ctor->type == JSCTOR_JSON_OBJECTAGG)
+ appendStringInfoString(buf, " ABSENT ON NULL");
+ }
+ else
+ {
+ if (ctor->type == JSCTOR_JSON_ARRAY ||
+ ctor->type == JSCTOR_JSON_ARRAYAGG)
+ appendStringInfoString(buf, " NULL ON NULL");
+ }
+
+ if (ctor->unique)
+ appendStringInfoString(buf, " WITH UNIQUE KEYS");
+
+ get_json_returning(ctor->returning, buf, true);
+}
+
+/*
+ * get_json_agg_constructor - Parse back an aggregate JsonConstructorExpr node
+ */
+static void
+get_json_agg_constructor(JsonConstructorExpr *ctor, deparse_context *context,
+ const char *funcname, bool is_json_objectagg)
+{
+ StringInfoData options;
+
+ initStringInfo(&options);
+ get_json_constructor_options(ctor, &options);
+
+ if (IsA(ctor->func, Aggref))
+ get_agg_expr_helper((Aggref *) ctor->func, context,
+ (Aggref *) ctor->func,
+ funcname, options.data, is_json_objectagg);
+ else if (IsA(ctor->func, WindowFunc))
+ get_windowfunc_expr_helper((WindowFunc *) ctor->func, context,
+ funcname, options.data,
+ is_json_objectagg);
+ else
+ elog(ERROR, "invalid JsonConstructorExpr underlying node type: %d",
+ nodeTag(ctor->func));
+}
+
+/*
+ * simple_quote_literal - Format a string as a SQL literal, append to buf
+ */
+static void
+simple_quote_literal(StringInfo buf, const char *val)
+{
+ const char *valptr;
+
+ /*
+ * We form the string literal according to the prevailing setting of
+ * standard_conforming_strings; we never use E''. User is responsible for
+ * making sure result is used correctly.
+ */
+ appendStringInfoChar(buf, '\'');
+ for (valptr = val; *valptr; valptr++)
+ {
+ char ch = *valptr;
+
+ if (SQL_STR_DOUBLE(ch, !standard_conforming_strings))
+ appendStringInfoChar(buf, ch);
+ appendStringInfoChar(buf, ch);
+ }
+ appendStringInfoChar(buf, '\'');
+}
+
+/* ----------
+ * get_sublink_expr - Parse back a sublink
+ * ----------
+ */
+static void
+get_sublink_expr(SubLink *sublink, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ Query *query = (Query *) (sublink->subselect);
+ char *opname = NULL;
+ bool need_paren;
+
+ if (sublink->subLinkType == ARRAY_SUBLINK)
+ appendStringInfoString(buf, "ARRAY(");
+ else
+ appendStringInfoChar(buf, '(');
+
+ /*
+ * Note that we print the name of only the first operator, when there are
+ * multiple combining operators. This is an approximation that could go
+ * wrong in various scenarios (operators in different schemas, renamed
+ * operators, etc) but there is not a whole lot we can do about it, since
+ * the syntax allows only one operator to be shown.
+ */
+ if (sublink->testexpr)
+ {
+ if (IsA(sublink->testexpr, OpExpr))
+ {
+ /* single combining operator */
+ OpExpr *opexpr = (OpExpr *) sublink->testexpr;
+
+ get_rule_expr(linitial(opexpr->args), context, true);
+ opname = generate_operator_name(opexpr->opno,
+ exprType(linitial(opexpr->args)),
+ exprType(lsecond(opexpr->args)));
+ }
+ else if (IsA(sublink->testexpr, BoolExpr))
+ {
+ /* multiple combining operators, = or <> cases */
+ char *sep;
+ ListCell *l;
+
+ appendStringInfoChar(buf, '(');
+ sep = "";
+ foreach(l, ((BoolExpr *) sublink->testexpr)->args)
+ {
+ OpExpr *opexpr = lfirst_node(OpExpr, l);
+
+ appendStringInfoString(buf, sep);
+ get_rule_expr(linitial(opexpr->args), context, true);
+ if (!opname)
+ opname = generate_operator_name(opexpr->opno,
+ exprType(linitial(opexpr->args)),
+ exprType(lsecond(opexpr->args)));
+ sep = ", ";
+ }
+ appendStringInfoChar(buf, ')');
+ }
+ else if (IsA(sublink->testexpr, RowCompareExpr))
+ {
+ /* multiple combining operators, < <= > >= cases */
+ RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr;
+
+ appendStringInfoChar(buf, '(');
+ get_rule_expr((Node *) rcexpr->largs, context, true);
+ opname = generate_operator_name(linitial_oid(rcexpr->opnos),
+ exprType(linitial(rcexpr->largs)),
+ exprType(linitial(rcexpr->rargs)));
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ elog(ERROR, "unrecognized testexpr type: %d",
+ (int) nodeTag(sublink->testexpr));
+ }
+
+ need_paren = true;
+
+ switch (sublink->subLinkType)
+ {
+ case EXISTS_SUBLINK:
+ appendStringInfoString(buf, "EXISTS ");
+ break;
+
+ case ANY_SUBLINK:
+ if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */
+ appendStringInfoString(buf, " IN ");
+ else
+ appendStringInfo(buf, " %s ANY ", opname);
+ break;
+
+ case ALL_SUBLINK:
+ appendStringInfo(buf, " %s ALL ", opname);
+ break;
+
+ case ROWCOMPARE_SUBLINK:
+ appendStringInfo(buf, " %s ", opname);
+ break;
+
+ case EXPR_SUBLINK:
+ case MULTIEXPR_SUBLINK:
+ case ARRAY_SUBLINK:
+ need_paren = false;
+ break;
+
+ case CTE_SUBLINK: /* shouldn't occur in a SubLink */
+ default:
+ elog(ERROR, "unrecognized sublink type: %d",
+ (int) sublink->subLinkType);
+ break;
+ }
+
+ if (need_paren)
+ appendStringInfoChar(buf, '(');
+
+ get_query_def(query, buf, context->namespaces, NULL, false,
+ context->prettyFlags, context->wrapColumn,
+ context->indentLevel);
+
+ if (need_paren)
+ appendStringInfoString(buf, "))");
+ else
+ appendStringInfoChar(buf, ')');
+}
+
+/* ----------
+ * get_tablefunc - Parse back a table function
+ * ----------
+ */
+static void
+get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit)
+{
+ StringInfo buf = context->buf;
+
+ /* XMLTABLE is the only existing implementation. */
+
+ appendStringInfoString(buf, "XMLTABLE(");
+
+ if (tf->ns_uris != NIL)
+ {
+ ListCell *lc1,
+ *lc2;
+ bool first = true;
+
+ appendStringInfoString(buf, "XMLNAMESPACES (");
+ forboth(lc1, tf->ns_uris, lc2, tf->ns_names)
+ {
+ Node *expr = (Node *) lfirst(lc1);
+ char *name = strVal(lfirst(lc2));
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ else
+ first = false;
+
+ if (name != NULL)
+ {
+ get_rule_expr(expr, context, showimplicit);
+ appendStringInfo(buf, " AS %s", name);
+ }
+ else
+ {
+ appendStringInfoString(buf, "DEFAULT ");
+ get_rule_expr(expr, context, showimplicit);
+ }
+ }
+ appendStringInfoString(buf, "), ");
+ }
+
+ appendStringInfoChar(buf, '(');
+ get_rule_expr((Node *) tf->rowexpr, context, showimplicit);
+ appendStringInfoString(buf, ") PASSING (");
+ get_rule_expr((Node *) tf->docexpr, context, showimplicit);
+ appendStringInfoChar(buf, ')');
+
+ if (tf->colexprs != NIL)
+ {
+ ListCell *l1;
+ ListCell *l2;
+ ListCell *l3;
+ ListCell *l4;
+ ListCell *l5;
+ int colnum = 0;
+
+ appendStringInfoString(buf, " COLUMNS ");
+ forfive(l1, tf->colnames, l2, tf->coltypes, l3, tf->coltypmods,
+ l4, tf->colexprs, l5, tf->coldefexprs)
+ {
+ char *colname = strVal(lfirst(l1));
+ Oid typid = lfirst_oid(l2);
+ int32 typmod = lfirst_int(l3);
+ Node *colexpr = (Node *) lfirst(l4);
+ Node *coldefexpr = (Node *) lfirst(l5);
+ bool ordinality = (tf->ordinalitycol == colnum);
+ bool notnull = bms_is_member(colnum, tf->notnulls);
+
+ if (colnum > 0)
+ appendStringInfoString(buf, ", ");
+ colnum++;
+
+ appendStringInfo(buf, "%s %s", quote_identifier(colname),
+ ordinality ? "FOR ORDINALITY" :
+ format_type_with_typemod(typid, typmod));
+ if (ordinality)
+ continue;
+
+ if (coldefexpr != NULL)
+ {
+ appendStringInfoString(buf, " DEFAULT (");
+ get_rule_expr((Node *) coldefexpr, context, showimplicit);
+ appendStringInfoChar(buf, ')');
+ }
+ if (colexpr != NULL)
+ {
+ appendStringInfoString(buf, " PATH (");
+ get_rule_expr((Node *) colexpr, context, showimplicit);
+ appendStringInfoChar(buf, ')');
+ }
+ if (notnull)
+ appendStringInfoString(buf, " NOT NULL");
+ }
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/* ----------
+ * get_from_clause - Parse back a FROM clause
+ *
+ * "prefix" is the keyword that denotes the start of the list of FROM
+ * elements. It is FROM when used to parse back SELECT and UPDATE, but
+ * is USING when parsing back DELETE.
+ * ----------
+ */
+static void
+get_from_clause(Query *query, const char *prefix, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ bool first = true;
+ ListCell *l;
+
+ /*
+ * We use the query's jointree as a guide to what to print. However, we
+ * must ignore auto-added RTEs that are marked not inFromCl. (These can
+ * only appear at the top level of the jointree, so it's sufficient to
+ * check here.) This check also ensures we ignore the rule pseudo-RTEs
+ * for NEW and OLD.
+ */
+ foreach(l, query->jointree->fromlist)
+ {
+ Node *jtnode = (Node *) lfirst(l);
+
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ RangeTblEntry *rte = rt_fetch(varno, query->rtable);
+
+ if (!rte->inFromCl)
+ continue;
+ }
+
+ if (first)
+ {
+ appendContextKeyword(context, prefix,
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 2);
+ first = false;
+
+ get_from_clause_item(jtnode, query, context);
+ }
+ else
+ {
+ StringInfoData itembuf;
+
+ appendStringInfoString(buf, ", ");
+
+ /*
+ * Put the new FROM item's text into itembuf so we can decide
+ * after we've got it whether or not it needs to go on a new line.
+ */
+ initStringInfo(&itembuf);
+ context->buf = &itembuf;
+
+ get_from_clause_item(jtnode, query, context);
+
+ /* Restore context's output buffer */
+ context->buf = buf;
+
+ /* Consider line-wrapping if enabled */
+ if (PRETTY_INDENT(context) && context->wrapColumn >= 0)
+ {
+ /* Does the new item start with a new line? */
+ if (itembuf.len > 0 && itembuf.data[0] == '\n')
+ {
+ /* If so, we shouldn't add anything */
+ /* instead, remove any trailing spaces currently in buf */
+ removeStringInfoSpaces(buf);
+ }
+ else
+ {
+ char *trailing_nl;
+
+ /* Locate the start of the current line in the buffer */
+ trailing_nl = strrchr(buf->data, '\n');
+ if (trailing_nl == NULL)
+ trailing_nl = buf->data;
+ else
+ trailing_nl++;
+
+ /*
+ * Add a newline, plus some indentation, if the new item
+ * would cause an overflow.
+ */
+ if (strlen(trailing_nl) + itembuf.len > context->wrapColumn)
+ appendContextKeyword(context, "", -PRETTYINDENT_STD,
+ PRETTYINDENT_STD,
+ PRETTYINDENT_VAR);
+ }
+ }
+
+ /* Add the new item */
+ appendStringInfoString(buf, itembuf.data);
+
+ /* clean up */
+ pfree(itembuf.data);
+ }
+ }
+}
+
+static void
+get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces);
+
+ if (IsA(jtnode, RangeTblRef))
+ {
+ int varno = ((RangeTblRef *) jtnode)->rtindex;
+ RangeTblEntry *rte = rt_fetch(varno, query->rtable);
+ deparse_columns *colinfo = deparse_columns_fetch(varno, dpns);
+ RangeTblFunction *rtfunc1 = NULL;
+ CitusRTEKind rteKind = GetRangeTblKind(rte);
+
+ if (rte->lateral)
+ appendStringInfoString(buf, "LATERAL ");
+
+ /* Print the FROM item proper */
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ /* Normal relation RTE */
+ appendStringInfo(buf, "%s%s",
+ only_marker(rte),
+ generate_relation_or_shard_name(rte->relid,
+ context->distrelid,
+ context->shardid,
+ context->namespaces));
+ break;
+ case RTE_SUBQUERY:
+ /* Subquery RTE */
+ appendStringInfoChar(buf, '(');
+ get_query_def(rte->subquery, buf, context->namespaces, NULL,
+ true,
+ context->prettyFlags, context->wrapColumn,
+ context->indentLevel);
+ appendStringInfoChar(buf, ')');
+ break;
+ case RTE_FUNCTION:
+ /* if it's a shard, do differently */
+ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD)
+ {
+ char *fragmentSchemaName = NULL;
+ char *fragmentTableName = NULL;
+
+ ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL);
+
+ /* use schema and table name from the remote alias */
+ appendStringInfo(buf, "%s%s",
+ only_marker(rte),
+ generate_fragment_name(fragmentSchemaName,
+ fragmentTableName));
+ break;
+ }
+
+ /* Function RTE */
+ rtfunc1 = (RangeTblFunction *) linitial(rte->functions);
+
+ /*
+ * Omit ROWS FROM() syntax for just one function, unless it
+ * has both a coldeflist and WITH ORDINALITY. If it has both,
+ * we must use ROWS FROM() syntax to avoid ambiguity about
+ * whether the coldeflist includes the ordinality column.
+ */
+ if (list_length(rte->functions) == 1 &&
+ (rtfunc1->funccolnames == NIL || !rte->funcordinality))
+ {
+ get_rule_expr_funccall(rtfunc1->funcexpr, context, true);
+ /* we'll print the coldeflist below, if it has one */
+ }
+ else
+ {
+ bool all_unnest;
+ ListCell *lc;
+
+ /*
+ * If all the function calls in the list are to unnest,
+ * and none need a coldeflist, then collapse the list back
+ * down to UNNEST(args). (If we had more than one
+ * built-in unnest function, this would get more
+ * difficult.)
+ *
+ * XXX This is pretty ugly, since it makes not-terribly-
+ * future-proof assumptions about what the parser would do
+ * with the output; but the alternative is to emit our
+ * nonstandard ROWS FROM() notation for what might have
+ * been a perfectly spec-compliant multi-argument
+ * UNNEST().
+ */
+ all_unnest = true;
+ foreach(lc, rte->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+
+ if (!IsA(rtfunc->funcexpr, FuncExpr) ||
+ ((FuncExpr *) rtfunc->funcexpr)->funcid != F_UNNEST_ANYARRAY ||
+ rtfunc->funccolnames != NIL)
+ {
+ all_unnest = false;
+ break;
+ }
+ }
+
+ if (all_unnest)
+ {
+ List *allargs = NIL;
+
+ foreach(lc, rte->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+ List *args = ((FuncExpr *) rtfunc->funcexpr)->args;
+
+ allargs = list_concat(allargs, args);
+ }
+
+ appendStringInfoString(buf, "UNNEST(");
+ get_rule_expr((Node *) allargs, context, true);
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ {
+ int funcno = 0;
+
+ appendStringInfoString(buf, "ROWS FROM(");
+ foreach(lc, rte->functions)
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
+
+ if (funcno > 0)
+ appendStringInfoString(buf, ", ");
+ get_rule_expr_funccall(rtfunc->funcexpr, context, true);
+ if (rtfunc->funccolnames != NIL)
+ {
+ /* Reconstruct the column definition list */
+ appendStringInfoString(buf, " AS ");
+ get_from_clause_coldeflist(rtfunc,
+ NULL,
+ context);
+ }
+ funcno++;
+ }
+ appendStringInfoChar(buf, ')');
+ }
+ /* prevent printing duplicate coldeflist below */
+ rtfunc1 = NULL;
+ }
+ if (rte->funcordinality)
+ appendStringInfoString(buf, " WITH ORDINALITY");
+ break;
+ case RTE_TABLEFUNC:
+ get_tablefunc(rte->tablefunc, context, true);
+ break;
+ case RTE_VALUES:
+ /* Values list RTE */
+ appendStringInfoChar(buf, '(');
+ get_values_def(rte->values_lists, context);
+ appendStringInfoChar(buf, ')');
+ break;
+ case RTE_CTE:
+ appendStringInfoString(buf, quote_identifier(rte->ctename));
+ break;
+ default:
+ elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind);
+ break;
+ }
+
+ /* Print the relation alias, if needed */
+ get_rte_alias(rte, varno, false, context);
+
+ /* Print the column definitions or aliases, if needed */
+ if (rtfunc1 && rtfunc1->funccolnames != NIL)
+ {
+ /* Reconstruct the columndef list, which is also the aliases */
+ get_from_clause_coldeflist(rtfunc1, colinfo, context);
+ }
+ else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD ||
+ (rte->alias != NULL && rte->alias->colnames != NIL))
+ {
+ /* Else print column aliases as needed */
+ get_column_alias_list(colinfo, context);
+ }
+ /* check if column's are given aliases in distributed tables */
+ else if (colinfo->parentUsing != NIL)
+ {
+ Assert(colinfo->printaliases);
+ get_column_alias_list(colinfo, context);
+ }
+
+ /* Tablesample clause must go after any alias */
+ if ((rteKind == CITUS_RTE_RELATION || rteKind == CITUS_RTE_SHARD) &&
+ rte->tablesample)
+ {
+ get_tablesample_def(rte->tablesample, context);
+ }
+ }
+ else if (IsA(jtnode, JoinExpr))
+ {
+ JoinExpr *j = (JoinExpr *) jtnode;
+ deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns);
+ bool need_paren_on_right;
+
+ need_paren_on_right = PRETTY_PAREN(context) &&
+ !IsA(j->rarg, RangeTblRef) &&
+ !(IsA(j->rarg, JoinExpr) && ((JoinExpr *) j->rarg)->alias != NULL);
+
+ if (!PRETTY_PAREN(context) || j->alias != NULL)
+ appendStringInfoChar(buf, '(');
+
+ get_from_clause_item(j->larg, query, context);
+
+ switch (j->jointype)
+ {
+ case JOIN_INNER:
+ if (j->quals)
+ appendContextKeyword(context, " JOIN ",
+ -PRETTYINDENT_STD,
+ PRETTYINDENT_STD,
+ PRETTYINDENT_JOIN);
+ else
+ appendContextKeyword(context, " CROSS JOIN ",
+ -PRETTYINDENT_STD,
+ PRETTYINDENT_STD,
+ PRETTYINDENT_JOIN);
+ break;
+ case JOIN_LEFT:
+ appendContextKeyword(context, " LEFT JOIN ",
+ -PRETTYINDENT_STD,
+ PRETTYINDENT_STD,
+ PRETTYINDENT_JOIN);
+ break;
+ case JOIN_FULL:
+ appendContextKeyword(context, " FULL JOIN ",
+ -PRETTYINDENT_STD,
+ PRETTYINDENT_STD,
+ PRETTYINDENT_JOIN);
+ break;
+ case JOIN_RIGHT:
+ appendContextKeyword(context, " RIGHT JOIN ",
+ -PRETTYINDENT_STD,
+ PRETTYINDENT_STD,
+ PRETTYINDENT_JOIN);
+ break;
+ default:
+ elog(ERROR, "unrecognized join type: %d",
+ (int) j->jointype);
+ }
+
+ if (need_paren_on_right)
+ appendStringInfoChar(buf, '(');
+ get_from_clause_item(j->rarg, query, context);
+ if (need_paren_on_right)
+ appendStringInfoChar(buf, ')');
+
+ if (j->usingClause)
+ {
+ ListCell *lc;
+ bool first = true;
+
+ appendStringInfoString(buf, " USING (");
+ /* Use the assigned names, not what's in usingClause */
+ foreach(lc, colinfo->usingNames)
+ {
+ char *colname = (char *) lfirst(lc);
+
+ if (first)
+ first = false;
+ else
+ appendStringInfoString(buf, ", ");
+ appendStringInfoString(buf, quote_identifier(colname));
+ }
+ appendStringInfoChar(buf, ')');
+
+ if (j->join_using_alias)
+ appendStringInfo(buf, " AS %s",
+ quote_identifier(j->join_using_alias->aliasname));
+ }
+ else if (j->quals)
+ {
+ appendStringInfoString(buf, " ON ");
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, '(');
+ get_rule_expr(j->quals, context, false);
+ if (!PRETTY_PAREN(context))
+ appendStringInfoChar(buf, ')');
+ }
+ else if (j->jointype != JOIN_INNER)
+ {
+ /* If we didn't say CROSS JOIN above, we must provide an ON */
+ appendStringInfoString(buf, " ON TRUE");
+ }
+
+ if (!PRETTY_PAREN(context) || j->alias != NULL)
+ appendStringInfoChar(buf, ')');
+
+ /* Yes, it's correct to put alias after the right paren ... */
+ if (j->alias != NULL)
+ {
+ /*
+ * Note that it's correct to emit an alias clause if and only if
+ * there was one originally. Otherwise we'd be converting a named
+ * join to unnamed or vice versa, which creates semantic
+ * subtleties we don't want. However, we might print a different
+ * alias name than was there originally.
+ */
+ appendStringInfo(buf, " %s",
+ quote_identifier(get_rtable_name(j->rtindex,
+ context)));
+ get_column_alias_list(colinfo, context);
+ }
+ }
+ else
+ elog(ERROR, "unrecognized node type: %d",
+ (int) nodeTag(jtnode));
+}
+
+/*
+ * get_rte_alias - print the relation's alias, if needed
+ *
+ * If printed, the alias is preceded by a space, or by " AS " if use_as is true.
+ */
+static void
+get_rte_alias(RangeTblEntry *rte, int varno, bool use_as,
+ deparse_context *context)
+{
+ deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces);
+ char *refname = get_rtable_name(varno, context);
+ deparse_columns *colinfo = deparse_columns_fetch(varno, dpns);
+ bool printalias = false;
+
+ if (rte->alias != NULL)
+ {
+ /* Always print alias if user provided one */
+ printalias = true;
+ }
+ else if (colinfo->printaliases)
+ {
+ /* Always print alias if we need to print column aliases */
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_RELATION)
+ {
+ /*
+ * No need to print alias if it's same as relation name (this would
+ * normally be the case, but not if set_rtable_names had to resolve a
+ * conflict).
+ */
+ if (strcmp(refname, get_relation_name(rte->relid)) != 0)
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_FUNCTION)
+ {
+ /*
+ * For a function RTE, always print alias. This covers possible
+ * renaming of the function and/or instability of the FigureColname
+ * rules for things that aren't simple functions. Note we'd need to
+ * force it anyway for the columndef list case.
+ */
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_SUBQUERY ||
+ rte->rtekind == RTE_VALUES)
+ {
+ /*
+ * For a subquery, always print alias. This makes the output
+ * SQL-spec-compliant, even though we allow such aliases to be omitted
+ * on input.
+ */
+ printalias = true;
+ }
+ else if (rte->rtekind == RTE_CTE)
+ {
+ /*
+ * No need to print alias if it's same as CTE name (this would
+ * normally be the case, but not if set_rtable_names had to resolve a
+ * conflict).
+ */
+ if (strcmp(refname, rte->ctename) != 0)
+ printalias = true;
+ }
+
+ if (printalias)
+ appendStringInfo(context->buf, "%s%s",
+ use_as ? " AS " : " ",
+ quote_identifier(refname));
+}
+
+/*
+ * get_column_alias_list - print column alias list for an RTE
+ *
+ * Caller must already have printed the relation's alias name.
+ */
+static void
+get_column_alias_list(deparse_columns *colinfo, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ int i;
+ bool first = true;
+
+ /* Don't print aliases if not needed */
+ if (!colinfo->printaliases)
+ return;
+
+ for (i = 0; i < colinfo->num_new_cols; i++)
+ {
+ char *colname = colinfo->new_colnames[i];
+
+ if (first)
+ {
+ appendStringInfoChar(buf, '(');
+ first = false;
+ }
+ else
+ appendStringInfoString(buf, ", ");
+ appendStringInfoString(buf, quote_identifier(colname));
+ }
+ if (!first)
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * get_from_clause_coldeflist - reproduce FROM clause coldeflist
+ *
+ * When printing a top-level coldeflist (which is syntactically also the
+ * relation's column alias list), use column names from colinfo. But when
+ * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the
+ * original coldeflist's names, which are available in rtfunc->funccolnames.
+ * Pass NULL for colinfo to select the latter behavior.
+ *
+ * The coldeflist is appended immediately (no space) to buf. Caller is
+ * responsible for ensuring that an alias or AS is present before it.
+ */
+static void
+get_from_clause_coldeflist(RangeTblFunction *rtfunc,
+ deparse_columns *colinfo,
+ deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ ListCell *l1;
+ ListCell *l2;
+ ListCell *l3;
+ ListCell *l4;
+ int i;
+
+ appendStringInfoChar(buf, '(');
+
+ i = 0;
+ forfour(l1, rtfunc->funccoltypes,
+ l2, rtfunc->funccoltypmods,
+ l3, rtfunc->funccolcollations,
+ l4, rtfunc->funccolnames)
+ {
+ Oid atttypid = lfirst_oid(l1);
+ int32 atttypmod = lfirst_int(l2);
+ Oid attcollation = lfirst_oid(l3);
+ char *attname;
+
+ if (colinfo)
+ attname = colinfo->colnames[i];
+ else
+ attname = strVal(lfirst(l4));
+
+ Assert(attname); /* shouldn't be any dropped columns here */
+
+ if (i > 0)
+ appendStringInfoString(buf, ", ");
+ appendStringInfo(buf, "%s %s",
+ quote_identifier(attname),
+ format_type_with_typemod(atttypid, atttypmod));
+ if (OidIsValid(attcollation) &&
+ attcollation != get_typcollation(atttypid))
+ appendStringInfo(buf, " COLLATE %s",
+ generate_collation_name(attcollation));
+
+ i++;
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * get_tablesample_def - print a TableSampleClause
+ */
+static void
+get_tablesample_def(TableSampleClause *tablesample, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ Oid argtypes[1];
+ int nargs;
+ ListCell *l;
+
+ /*
+ * We should qualify the handler's function name if it wouldn't be
+ * resolved by lookup in the current search path.
+ */
+ argtypes[0] = INTERNALOID;
+ appendStringInfo(buf, " TABLESAMPLE %s (",
+ generate_function_name(tablesample->tsmhandler, 1,
+ NIL, argtypes,
+ false, NULL, EXPR_KIND_NONE));
+
+ nargs = 0;
+ foreach(l, tablesample->args)
+ {
+ if (nargs++ > 0)
+ appendStringInfoString(buf, ", ");
+ get_rule_expr((Node *) lfirst(l), context, false);
+ }
+ appendStringInfoChar(buf, ')');
+
+ if (tablesample->repeatable != NULL)
+ {
+ appendStringInfoString(buf, " REPEATABLE (");
+ get_rule_expr((Node *) tablesample->repeatable, context, false);
+ appendStringInfoChar(buf, ')');
+ }
+}
+
+/*
+ * get_opclass_name - fetch name of an index operator class
+ *
+ * The opclass name is appended (after a space) to buf.
+ *
+ * Output is suppressed if the opclass is the default for the given
+ * actual_datatype. (If you don't want this behavior, just pass
+ * InvalidOid for actual_datatype.)
+ */
+static void
+get_opclass_name(Oid opclass, Oid actual_datatype,
+ StringInfo buf)
+{
+ HeapTuple ht_opc;
+ Form_pg_opclass opcrec;
+ char *opcname;
+ char *nspname;
+
+ ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));
+ if (!HeapTupleIsValid(ht_opc))
+ elog(ERROR, "cache lookup failed for opclass %u", opclass);
+ opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc);
+
+ if (!OidIsValid(actual_datatype) ||
+ GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
+ {
+ /* Okay, we need the opclass name. Do we need to qualify it? */
+ opcname = NameStr(opcrec->opcname);
+ if (OpclassIsVisible(opclass))
+ appendStringInfo(buf, " %s", quote_identifier(opcname));
+ else
+ {
+ nspname = get_namespace_name_or_temp(opcrec->opcnamespace);
+ appendStringInfo(buf, " %s.%s",
+ quote_identifier(nspname),
+ quote_identifier(opcname));
+ }
+ }
+ ReleaseSysCache(ht_opc);
+}
+
+/*
+ * processIndirection - take care of array and subfield assignment
+ *
+ * We strip any top-level FieldStore or assignment SubscriptingRef nodes that
+ * appear in the input, printing them as decoration for the base column
+ * name (which we assume the caller just printed). We might also need to
+ * strip CoerceToDomain nodes, but only ones that appear above assignment
+ * nodes.
+ *
+ * Returns the subexpression that's to be assigned.
+ */
+static Node *
+processIndirection(Node *node, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ CoerceToDomain *cdomain = NULL;
+
+ for (;;)
+ {
+ if (node == NULL)
+ break;
+ if (IsA(node, FieldStore))
+ {
+ FieldStore *fstore = (FieldStore *) node;
+ Oid typrelid;
+ char *fieldname;
+
+ /* lookup tuple type */
+ typrelid = get_typ_typrelid(fstore->resulttype);
+ if (!OidIsValid(typrelid))
+ elog(ERROR, "argument type %s of FieldStore is not a tuple type",
+ format_type_be(fstore->resulttype));
+
+ /*
+ * Print the field name. There should only be one target field in
+ * stored rules. There could be more than that in executable
+ * target lists, but this function cannot be used for that case.
+ */
+ Assert(list_length(fstore->fieldnums) == 1);
+ fieldname = get_attname(typrelid,
+ linitial_int(fstore->fieldnums), false);
+ appendStringInfo(buf, ".%s", quote_identifier(fieldname));
+
+ /*
+ * We ignore arg since it should be an uninteresting reference to
+ * the target column or subcolumn.
+ */
+ node = (Node *) linitial(fstore->newvals);
+ }
+ else if (IsA(node, SubscriptingRef))
+ {
+ SubscriptingRef *sbsref = (SubscriptingRef *) node;
+
+ if (sbsref->refassgnexpr == NULL)
+ break;
+ printSubscripts(sbsref, context);
+
+ /*
+ * We ignore refexpr since it should be an uninteresting reference
+ * to the target column or subcolumn.
+ */
+ node = (Node *) sbsref->refassgnexpr;
+ }
+ else if (IsA(node, CoerceToDomain))
+ {
+ cdomain = (CoerceToDomain *) node;
+ /* If it's an explicit domain coercion, we're done */
+ if (cdomain->coercionformat != COERCE_IMPLICIT_CAST)
+ break;
+ /* Tentatively descend past the CoerceToDomain */
+ node = (Node *) cdomain->arg;
+ }
+ else
+ break;
+ }
+
+ /*
+ * If we descended past a CoerceToDomain whose argument turned out not to
+ * be a FieldStore or array assignment, back up to the CoerceToDomain.
+ * (This is not enough to be fully correct if there are nested implicit
+ * CoerceToDomains, but such cases shouldn't ever occur.)
+ */
+ if (cdomain && node == (Node *) cdomain->arg)
+ node = (Node *) cdomain;
+
+ return node;
+}
+
+static void
+printSubscripts(SubscriptingRef *sbsref, deparse_context *context)
+{
+ StringInfo buf = context->buf;
+ ListCell *lowlist_item;
+ ListCell *uplist_item;
+
+ lowlist_item = list_head(sbsref->reflowerindexpr); /* could be NULL */
+ foreach(uplist_item, sbsref->refupperindexpr)
+ {
+ appendStringInfoChar(buf, '[');
+ if (lowlist_item)
+ {
+ /* If subexpression is NULL, get_rule_expr prints nothing */
+ get_rule_expr((Node *) lfirst(lowlist_item), context, false);
+ appendStringInfoChar(buf, ':');
+ lowlist_item = lnext(sbsref->reflowerindexpr, lowlist_item);
+ }
+ /* If subexpression is NULL, get_rule_expr prints nothing */
+ get_rule_expr((Node *) lfirst(uplist_item), context, false);
+ appendStringInfoChar(buf, ']');
+ }
+}
+
+/*
+ * get_relation_name
+ * Get the unqualified name of a relation specified by OID
+ *
+ * This differs from the underlying get_rel_name() function in that it will
+ * throw error instead of silently returning NULL if the OID is bad.
+ */
+static char *
+get_relation_name(Oid relid)
+{
+ char *relname = get_rel_name(relid);
+
+ if (!relname)
+ elog(ERROR, "cache lookup failed for relation %u", relid);
+ return relname;
+}
+
+/*
+ * generate_relation_or_shard_name
+ * Compute the name to display for a relation or shard
+ *
+ * If the provided relid is equal to the provided distrelid, this function
+ * returns a shard-extended relation name; otherwise, it falls through to a
+ * simple generate_relation_name call.
+ */
+static char *
+generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid,
+ List *namespaces)
+{
+ char *relname = NULL;
+
+ if (relid == distrelid)
+ {
+ relname = get_relation_name(relid);
+
+ if (shardid > 0)
+ {
+ Oid schemaOid = get_rel_namespace(relid);
+ char *schemaName = get_namespace_name_or_temp(schemaOid);
+
+ AppendShardIdToName(&relname, shardid);
+
+ relname = quote_qualified_identifier(schemaName, relname);
+ }
+ }
+ else
+ {
+ relname = generate_relation_name(relid, namespaces);
+ }
+
+ return relname;
+}
+
+/*
+ * generate_relation_name
+ * Compute the name to display for a relation specified by OID
+ *
+ * The result includes all necessary quoting and schema-prefixing.
+ *
+ * If namespaces isn't NIL, it must be a list of deparse_namespace nodes.
+ * We will forcibly qualify the relation name if it equals any CTE name
+ * visible in the namespace list.
+ */
+char *
+generate_relation_name(Oid relid, List *namespaces)
+{
+ HeapTuple tp;
+ Form_pg_class reltup;
+ bool need_qual;
+ ListCell *nslist;
+ char *relname;
+ char *nspname;
+ char *result;
+
+ tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
+ if (!HeapTupleIsValid(tp))
+ elog(ERROR, "cache lookup failed for relation %u", relid);
+ reltup = (Form_pg_class) GETSTRUCT(tp);
+ relname = NameStr(reltup->relname);
+
+ /* Check for conflicting CTE name */
+ need_qual = false;
+ foreach(nslist, namespaces)
+ {
+ deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist);
+ ListCell *ctlist;
+
+ foreach(ctlist, dpns->ctes)
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist);
+
+ if (strcmp(cte->ctename, relname) == 0)
+ {
+ need_qual = true;
+ break;
+ }
+ }
+ if (need_qual)
+ break;
+ }
+
+ /* Otherwise, qualify the name if not visible in search path */
+ if (!need_qual)
+ need_qual = !RelationIsVisible(relid);
+
+ if (need_qual)
+ nspname = get_namespace_name_or_temp(reltup->relnamespace);
+ else
+ nspname = NULL;
+
+ result = quote_qualified_identifier(nspname, relname);
+
+ ReleaseSysCache(tp);
+
+ return result;
+}
+
+/*
+ * generate_rte_shard_name returns the qualified name of the shard given a
+ * CITUS_RTE_SHARD range table entry.
+ */
+static char *
+generate_rte_shard_name(RangeTblEntry *rangeTableEntry)
+{
+ char *shardSchemaName = NULL;
+ char *shardTableName = NULL;
+
+ Assert(GetRangeTblKind(rangeTableEntry) == CITUS_RTE_SHARD);
+
+ ExtractRangeTblExtraData(rangeTableEntry, NULL, &shardSchemaName, &shardTableName,
+ NULL);
+
+ return generate_fragment_name(shardSchemaName, shardTableName);
+}
+
+/*
+ * generate_fragment_name
+ * Compute the name to display for a shard or merged table
+ *
+ * The result includes all necessary quoting and schema-prefixing. The schema
+ * name can be NULL for regular shards. For merged tables, they are always
+ * declared within a job-specific schema, and therefore can't have null schema
+ * names.
+ */
+static char *
+generate_fragment_name(char *schemaName, char *tableName)
+{
+ StringInfo fragmentNameString = makeStringInfo();
+
+ if (schemaName != NULL)
+ {
+ appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName),
+ quote_identifier(tableName));
+ }
+ else
+ {
+ appendStringInfoString(fragmentNameString, quote_identifier(tableName));
+ }
+
+ return fragmentNameString->data;
+}
+
+/*
+ * generate_function_name
+ * Compute the name to display for a function specified by OID,
+ * given that it is being called with the specified actual arg names and
+ * types. (Those matter because of ambiguous-function resolution rules.)
+ *
+ * If we're dealing with a potentially variadic function (in practice, this
+ * means a FuncExpr or Aggref, not some other way of calling a function), then
+ * has_variadic must specify whether variadic arguments have been merged,
+ * and *use_variadic_p will be set to indicate whether to print VARIADIC in
+ * the output. For non-FuncExpr cases, has_variadic should be false and
+ * use_variadic_p can be NULL.
+ *
+ * The result includes all necessary quoting and schema-prefixing.
+ */
+static char *
+generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes,
+ bool has_variadic, bool *use_variadic_p,
+ ParseExprKind special_exprkind)
+{
+ char *result;
+ HeapTuple proctup;
+ Form_pg_proc procform;
+ char *proname;
+ bool use_variadic;
+ char *nspname;
+ FuncDetailCode p_result;
+ Oid p_funcid;
+ Oid p_rettype;
+ bool p_retset;
+ int p_nvargs;
+ Oid p_vatype;
+ Oid *p_true_typeids;
+ bool force_qualify = false;
+
+ proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+ if (!HeapTupleIsValid(proctup))
+ elog(ERROR, "cache lookup failed for function %u", funcid);
+ procform = (Form_pg_proc) GETSTRUCT(proctup);
+ proname = NameStr(procform->proname);
+
+ /*
+ * Due to parser hacks to avoid needing to reserve CUBE, we need to force
+ * qualification in some special cases.
+ */
+ if (special_exprkind == EXPR_KIND_GROUP_BY)
+ {
+ if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0)
+ force_qualify = true;
+ }
+
+ /*
+ * Determine whether VARIADIC should be printed. We must do this first
+ * since it affects the lookup rules in func_get_detail().
+ *
+ * Currently, we always print VARIADIC if the function has a merged
+ * variadic-array argument. Note that this is always the case for
+ * functions taking a VARIADIC argument type other than VARIADIC ANY.
+ *
+ * In principle, if VARIADIC wasn't originally specified and the array
+ * actual argument is deconstructable, we could print the array elements
+ * separately and not print VARIADIC, thus more nearly reproducing the
+ * original input. For the moment that seems like too much complication
+ * for the benefit, and anyway we do not know whether VARIADIC was
+ * originally specified if it's a non-ANY type.
+ */
+ if (use_variadic_p)
+ {
+ /* Parser should not have set funcvariadic unless fn is variadic */
+ Assert(!has_variadic || OidIsValid(procform->provariadic));
+ use_variadic = has_variadic;
+ *use_variadic_p = use_variadic;
+ }
+ else
+ {
+ Assert(!has_variadic);
+ use_variadic = false;
+ }
+
+ /*
+ * The idea here is to schema-qualify only if the parser would fail to
+ * resolve the correct function given the unqualified func name with the
+ * specified argtypes and VARIADIC flag. But if we already decided to
+ * force qualification, then we can skip the lookup and pretend we didn't
+ * find it.
+ */
+ if (!force_qualify)
+ p_result = func_get_detail(list_make1(makeString(proname)),
+ NIL, argnames, nargs, argtypes,
+ !use_variadic, true, false,
+ &p_funcid, &p_rettype,
+ &p_retset, &p_nvargs, &p_vatype,
+ &p_true_typeids, NULL);
+ else
+ {
+ p_result = FUNCDETAIL_NOTFOUND;
+ p_funcid = InvalidOid;
+ }
+
+ if ((p_result == FUNCDETAIL_NORMAL ||
+ p_result == FUNCDETAIL_AGGREGATE ||
+ p_result == FUNCDETAIL_WINDOWFUNC) &&
+ p_funcid == funcid)
+ nspname = NULL;
+ else
+ nspname = get_namespace_name_or_temp(procform->pronamespace);
+
+ result = quote_qualified_identifier(nspname, proname);
+
+ ReleaseSysCache(proctup);
+
+ return result;
+}
+
+/*
+ * generate_operator_name
+ * Compute the name to display for an operator specified by OID,
+ * given that it is being called with the specified actual arg types.
+ * (Arg types matter because of ambiguous-operator resolution rules.
+ * Pass InvalidOid for unused arg of a unary operator.)
+ *
+ * The result includes all necessary quoting and schema-prefixing,
+ * plus the OPERATOR() decoration needed to use a qualified operator name
+ * in an expression.
+ */
+char *
+generate_operator_name(Oid operid, Oid arg1, Oid arg2)
+{
+ StringInfoData buf;
+ HeapTuple opertup;
+ Form_pg_operator operform;
+ char *oprname;
+ char *nspname;
+
+ initStringInfo(&buf);
+
+ opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid));
+ if (!HeapTupleIsValid(opertup))
+ elog(ERROR, "cache lookup failed for operator %u", operid);
+ operform = (Form_pg_operator) GETSTRUCT(opertup);
+ oprname = NameStr(operform->oprname);
+
+ /*
+ * Unlike generate_operator_name() in postgres/src/backend/utils/adt/ruleutils.c,
+ * we don't check if the operator is in current namespace or not. This is
+ * because this check is costly when the operator is not in current namespace.
+ */
+ nspname = get_namespace_name_or_temp(operform->oprnamespace);
+ Assert(nspname != NULL);
+ appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname));
+ appendStringInfoString(&buf, oprname);
+ appendStringInfoChar(&buf, ')');
+
+ ReleaseSysCache(opertup);
+
+ return buf.data;
+}
+
+/*
+ * get_one_range_partition_bound_string
+ * A C string representation of one range partition bound
+ */
+char *
+get_range_partbound_string(List *bound_datums)
+{
+ deparse_context context;
+ StringInfo buf = makeStringInfo();
+ ListCell *cell;
+ char *sep;
+
+ memset(&context, 0, sizeof(deparse_context));
+ context.buf = buf;
+
+ appendStringInfoChar(buf, '(');
+ sep = "";
+ foreach(cell, bound_datums)
+ {
+ PartitionRangeDatum *datum =
+ lfirst_node(PartitionRangeDatum, cell);
+
+ appendStringInfoString(buf, sep);
+ if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE)
+ appendStringInfoString(buf, "MINVALUE");
+ else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE)
+ appendStringInfoString(buf, "MAXVALUE");
+ else
+ {
+ Const *val = castNode(Const, datum->value);
+
+ get_const_expr(val, &context, -1);
+ }
+ sep = ", ";
+ }
+ appendStringInfoChar(buf, ')');
+
+ return buf->data;
+}
+
+/*
+ * Collect a list of OIDs of all sequences owned by the specified relation,
+ * and column if specified. If deptype is not zero, then only find sequences
+ * with the specified dependency type.
+ */
+List *
+getOwnedSequences_internal(Oid relid, AttrNumber attnum, char deptype)
+{
+ List *result = NIL;
+ Relation depRel;
+ ScanKeyData key[3];
+ SysScanDesc scan;
+ HeapTuple tup;
+
+ depRel = table_open(DependRelationId, AccessShareLock);
+
+ ScanKeyInit(&key[0],
+ Anum_pg_depend_refclassid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(RelationRelationId));
+ ScanKeyInit(&key[1],
+ Anum_pg_depend_refobjid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(relid));
+ if (attnum)
+ ScanKeyInit(&key[2],
+ Anum_pg_depend_refobjsubid,
+ BTEqualStrategyNumber, F_INT4EQ,
+ Int32GetDatum(attnum));
+
+ scan = systable_beginscan(depRel, DependReferenceIndexId, true,
+ NULL, attnum ? 3 : 2, key);
+
+ while (HeapTupleIsValid(tup = systable_getnext(scan)))
+ {
+ Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
+
+ /*
+ * We assume any auto or internal dependency of a sequence on a column
+ * must be what we are looking for. (We need the relkind test because
+ * indexes can also have auto dependencies on columns.)
+ */
+ if (deprec->classid == RelationRelationId &&
+ deprec->objsubid == 0 &&
+ deprec->refobjsubid != 0 &&
+ (deprec->deptype == DEPENDENCY_AUTO || deprec->deptype == DEPENDENCY_INTERNAL) &&
+ get_rel_relkind(deprec->objid) == RELKIND_SEQUENCE)
+ {
+ if (!deptype || deprec->deptype == deptype)
+ result = lappend_oid(result, deprec->objid);
+ }
+ }
+
+ systable_endscan(scan);
+
+ table_close(depRel, AccessShareLock);
+
+ return result;
+}
+
+/*
+ * get_insert_column_names_list Prepares the insert-column-names list. Any indirection
+ * decoration needed on the column names can be inferred from the top targetlist.
+ */
+static List *
+get_insert_column_names_list(List *targetList, StringInfo buf,
+ deparse_context *context, RangeTblEntry *rte)
+{
+ char *sep;
+ ListCell *l;
+ List *strippedexprs;
+
+ strippedexprs = NIL;
+ sep = "";
+ appendStringInfoChar(buf, '(');
+ foreach(l, targetList)
+ {
+ TargetEntry *tle = (TargetEntry *) lfirst(l);
+
+ if (tle->resjunk)
+ continue; /* ignore junk entries */
+
+ appendStringInfoString(buf, sep);
+ sep = ", ";
+
+ /*
+ * Put out name of target column; look in the catalogs, not at
+ * tle->resname, since resname will fail to track RENAME.
+ */
+ appendStringInfoString(buf,
+ quote_identifier(get_attname(rte->relid,
+ tle->resno,
+ false)));
+
+ /*
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
+ * Add the stripped expressions to strippedexprs. (If it's a
+ * single-VALUES statement, the stripped expressions are the VALUES to
+ * print below. Otherwise they're just Vars and not really
+ * interesting.)
+ */
+ strippedexprs = lappend(strippedexprs,
+ processIndirection((Node *) tle->expr,
+ context));
+ }
+ appendStringInfoString(buf, ") ");
+
+ return strippedexprs;
+}
+#endif /* (PG_VERSION_NUM >= PG_VERSION_16) && (PG_VERSION_NUM < PG_VERSION_17) */
diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c
index 039475735..61a52e7c4 100644
--- a/src/backend/distributed/executor/adaptive_executor.c
+++ b/src/backend/distributed/executor/adaptive_executor.c
@@ -519,7 +519,9 @@ typedef enum TaskExecutionState
/*
* PlacementExecutionOrder indicates whether a command should be executed
* on any replica, on all replicas sequentially (in order), or on all
- * replicas in parallel.
+ * replicas in parallel. In other words, EXECUTION_ORDER_ANY is used for
+ * SELECTs, EXECUTION_ORDER_SEQUENTIAL/EXECUTION_ORDER_PARALLEL is used for
+ * DML/DDL.
*/
typedef enum PlacementExecutionOrder
{
@@ -2043,7 +2045,7 @@ ProcessSessionsWithFailedWaitEventSetOperations(DistributedExecution *execution)
/*
* HasIncompleteConnectionEstablishment returns true if any of the connections
- * that has been initiated by the executor is in initilization stage.
+ * that has been initiated by the executor is in initialization stage.
*/
static bool
HasIncompleteConnectionEstablishment(DistributedExecution *execution)
@@ -2656,7 +2658,7 @@ OpenNewConnections(WorkerPool *workerPool, int newConnectionCount,
{
/*
* The worker pool has just started to establish connections. We need to
- * defer this initilization after StartNodeUserDatabaseConnection()
+ * defer this initialization after StartNodeUserDatabaseConnection()
* because for non-optional connections, we have some logic to wait
* until a connection is allowed to be established.
*/
@@ -4687,6 +4689,10 @@ TaskExecutionStateMachine(ShardCommandExecution *shardCommandExecution)
{
currentTaskExecutionState = TASK_EXECUTION_FAILED;
}
+ else if (executionOrder != EXECUTION_ORDER_ANY && failedPlacementCount > 0)
+ {
+ currentTaskExecutionState = TASK_EXECUTION_FAILED;
+ }
else if (executionOrder == EXECUTION_ORDER_ANY && donePlacementCount > 0)
{
currentTaskExecutionState = TASK_EXECUTION_FINISHED;
diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c
index 8179f0082..d17e65217 100644
--- a/src/backend/distributed/executor/intermediate_results.c
+++ b/src/backend/distributed/executor/intermediate_results.c
@@ -279,7 +279,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
/*
* PrepareIntermediateResultBroadcast gets a RemoteFileDestReceiver and does
- * the necessary initilizations including initiating the remote connections
+ * the necessary initializations including initiating the remote connections
* and creating the local file, which is necessary (it might be both).
*/
static void
diff --git a/src/backend/distributed/executor/merge_executor.c b/src/backend/distributed/executor/merge_executor.c
index f501497c0..bcacbcd1e 100644
--- a/src/backend/distributed/executor/merge_executor.c
+++ b/src/backend/distributed/executor/merge_executor.c
@@ -100,7 +100,7 @@ ExecuteSourceAtWorkerAndRepartition(CitusScanState *scanState)
Query *mergeQuery =
copyObject(distributedPlan->modifyQueryViaCoordinatorOrRepartition);
RangeTblEntry *targetRte = ExtractResultRelationRTE(mergeQuery);
- RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery);
+ RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery, false);
Oid targetRelationId = targetRte->relid;
bool hasReturning = distributedPlan->expectResults;
Query *sourceQuery = sourceRte->subquery;
@@ -211,7 +211,7 @@ ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState)
Query *mergeQuery =
copyObject(distributedPlan->modifyQueryViaCoordinatorOrRepartition);
RangeTblEntry *targetRte = ExtractResultRelationRTE(mergeQuery);
- RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery);
+ RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery, false);
Query *sourceQuery = sourceRte->subquery;
Oid targetRelationId = targetRte->relid;
PlannedStmt *sourcePlan =
diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c
index c307dc737..f970cecd1 100644
--- a/src/backend/distributed/metadata/dependency.c
+++ b/src/backend/distributed/metadata/dependency.c
@@ -1022,13 +1022,12 @@ GetUndistributableDependency(const ObjectAddress *objectAddress)
if (!SupportedDependencyByCitus(dependency))
{
/*
- * Skip roles and text search templates.
- *
- * Roles should be handled manually with Citus community whereas text search
- * templates should be handled manually in both community and enterprise
+ * Since we do not yet support distributed TS TEMPLATE and AM objects, we skip
+ * dependency checks for text search templates. The user is expected to
+ * manually create the TS TEMPLATE and AM objects.
*/
- if (getObjectClass(dependency) != OCLASS_ROLE &&
- getObjectClass(dependency) != OCLASS_TSTEMPLATE)
+ if (getObjectClass(dependency) != OCLASS_TSTEMPLATE &&
+ getObjectClass(dependency) != OCLASS_AM)
{
return dependency;
}
@@ -1200,7 +1199,7 @@ FirstExtensionWithSchema(Oid schemaId)
ScanKeyData entry[1];
ScanKeyInit(&entry[0], Anum_pg_extension_extnamespace, BTEqualStrategyNumber,
- F_INT4EQ, schemaId);
+ F_OIDEQ, ObjectIdGetDatum(schemaId));
SysScanDesc scan = systable_beginscan(relation, InvalidOid, false, NULL, 1, entry);
HeapTuple extensionTuple = systable_getnext(scan);
diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c
index 5997480a0..c6a8b0a22 100644
--- a/src/backend/distributed/metadata/distobject.c
+++ b/src/backend/distributed/metadata/distobject.c
@@ -510,7 +510,7 @@ UpdateDistributedObjectColocationId(uint32 oldColocationId,
/* scan pg_dist_object for colocationId equal to old colocationId */
ScanKeyInit(&scanKey[0], Anum_pg_dist_object_colocationid,
BTEqualStrategyNumber,
- F_INT4EQ, UInt32GetDatum(oldColocationId));
+ F_INT4EQ, Int32GetDatum(oldColocationId));
SysScanDesc scanDescriptor = systable_beginscan(pgDistObjectRel,
InvalidOid,
diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c
index ebc048376..55d0f11c5 100644
--- a/src/backend/distributed/metadata/metadata_cache.c
+++ b/src/backend/distributed/metadata/metadata_cache.c
@@ -83,7 +83,9 @@
#include "utils/memutils.h"
#include "utils/palloc.h"
#include "utils/rel.h"
+#if PG_VERSION_NUM < PG_VERSION_16
#include "utils/relfilenodemap.h"
+#endif
#include "utils/relmapper.h"
#include "utils/resowner.h"
#include "utils/syscache.h"
@@ -131,6 +133,19 @@ typedef struct ShardIdCacheEntry
int shardIndex;
} ShardIdCacheEntry;
+/*
+ * ExtensionCreatedState is used to track if citus extension has been created
+ * using CREATE EXTENSION command.
+ * UNKNOWN : MetadataCache is invalid. State is UNKNOWN.
+ * CREATED : Citus is created.
+ * NOTCREATED : Citus is not created.
+ */
+typedef enum ExtensionCreatedState
+{
+ UNKNOWN = 0,
+ CREATED = 1,
+ NOTCREATED = 2,
+} ExtensionCreatedState;
/*
* State which should be cleared upon DROP EXTENSION. When the configuration
@@ -138,7 +153,7 @@ typedef struct ShardIdCacheEntry
*/
typedef struct MetadataCacheData
{
- bool extensionLoaded;
+ ExtensionCreatedState extensionCreatedState;
Oid distShardRelationId;
Oid distPlacementRelationId;
Oid distBackgroundJobRelationId;
@@ -286,7 +301,6 @@ static void CreateDistTableCache(void);
static void CreateShardIdCache(void);
static void CreateDistObjectCache(void);
static void InvalidateForeignRelationGraphCacheCallback(Datum argument, Oid relationId);
-static void InvalidateDistRelationCacheCallback(Datum argument, Oid relationId);
static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId);
static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId);
static void InvalidateConnParamsCacheCallback(Datum argument, Oid relationId);
@@ -352,7 +366,8 @@ EnsureModificationsCanRun(void)
{
if (RecoveryInProgress() && !WritableStandbyCoordinator)
{
- ereport(ERROR, (errmsg("writing to worker nodes is not currently allowed"),
+ ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
+ errmsg("writing to worker nodes is not currently allowed"),
errdetail("the database is read-only")));
}
@@ -413,7 +428,8 @@ EnsureModificationsCanRunOnRelation(Oid relationId)
if (modifiedTableReplicated)
{
- ereport(ERROR, (errmsg("writing to worker nodes is not currently "
+ ereport(ERROR, (errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
+ errmsg("writing to worker nodes is not currently "
"allowed for replicated tables such as reference "
"tables or hash distributed tables with replication "
"factor greater than 1."),
@@ -445,6 +461,52 @@ IsCitusTableType(Oid relationId, CitusTableType tableType)
}
+/*
+ * GetCitusTableType is a helper function that returns the CitusTableType
+ * for the given relationId.
+ * Note that a single table can be qualified as multiple CitusTableType, such
+ * as hash distributed tables are both HASH_DISTRIBUTED and DISTRIBUTED_TABLE.
+ * This function returns the base type for a given table.
+ *
+ * If the table is not a Citus table, ANY_CITUS_TABLE_TYPE is returned.
+ */
+CitusTableType
+GetCitusTableType(CitusTableCacheEntry *tableEntry)
+{
+ /* we do not expect local tables here */
+ Assert(tableEntry != NULL);
+
+ if (IsCitusTableTypeCacheEntry(tableEntry, HASH_DISTRIBUTED))
+ {
+ return HASH_DISTRIBUTED;
+ }
+ else if (IsCitusTableTypeCacheEntry(tableEntry, SINGLE_SHARD_DISTRIBUTED))
+ {
+ return SINGLE_SHARD_DISTRIBUTED;
+ }
+ else if (IsCitusTableTypeCacheEntry(tableEntry, REFERENCE_TABLE))
+ {
+ return REFERENCE_TABLE;
+ }
+ else if (IsCitusTableTypeCacheEntry(tableEntry, CITUS_LOCAL_TABLE))
+ {
+ return CITUS_LOCAL_TABLE;
+ }
+ else if (IsCitusTableTypeCacheEntry(tableEntry, APPEND_DISTRIBUTED))
+ {
+ return APPEND_DISTRIBUTED;
+ }
+ else if (IsCitusTableTypeCacheEntry(tableEntry, RANGE_DISTRIBUTED))
+ {
+ return RANGE_DISTRIBUTED;
+ }
+ else
+ {
+ return ANY_CITUS_TABLE_TYPE;
+ }
+}
+
+
/*
* IsCitusTableTypeCacheEntry returns true if the given table cache entry
* belongs to a citus table that matches the given table type.
@@ -2137,16 +2199,30 @@ HasOverlappingShardInterval(ShardInterval **shardIntervalArray,
bool
CitusHasBeenLoaded(void)
{
- if (!MetadataCache.extensionLoaded || creating_extension)
+ /*
+ * We do not use Citus hooks during CREATE/ALTER EXTENSION citus
+ * since the objects used by the C code might be not be there yet.
+ */
+ if (creating_extension)
{
- /*
- * Refresh if we have not determined whether the extension has been
- * loaded yet, or in case of ALTER EXTENSION since we want to treat
- * Citus as "not loaded" during ALTER EXTENSION citus.
- */
- bool extensionLoaded = CitusHasBeenLoadedInternal();
+ Oid citusExtensionOid = get_extension_oid("citus", true);
- if (extensionLoaded && !MetadataCache.extensionLoaded)
+ if (CurrentExtensionObject == citusExtensionOid)
+ {
+ return false;
+ }
+ }
+
+ /*
+ * If extensionCreatedState is UNKNOWN, query pg_extension for Citus
+ * and cache the result. Otherwise return the value extensionCreatedState
+ * indicates.
+ */
+ if (MetadataCache.extensionCreatedState == UNKNOWN)
+ {
+ bool extensionCreated = CitusHasBeenLoadedInternal();
+
+ if (extensionCreated)
{
/*
* Loaded Citus for the first time in this session, or first time after
@@ -2158,31 +2234,22 @@ CitusHasBeenLoaded(void)
*/
StartupCitusBackend();
- /*
- * InvalidateDistRelationCacheCallback resets state such as extensionLoaded
- * when it notices changes to pg_dist_partition (which usually indicate
- * `DROP EXTENSION citus;` has been run)
- *
- * Ensure InvalidateDistRelationCacheCallback will notice those changes
- * by caching pg_dist_partition's oid.
- *
- * We skip these checks during upgrade since pg_dist_partition is not
- * present during early stages of upgrade operation.
- */
- DistPartitionRelationId();
-
/*
* This needs to be initialized so we can receive foreign relation graph
* invalidation messages in InvalidateForeignRelationGraphCacheCallback().
* See the comments of InvalidateForeignKeyGraph for more context.
*/
DistColocationRelationId();
- }
- MetadataCache.extensionLoaded = extensionLoaded;
+ MetadataCache.extensionCreatedState = CREATED;
+ }
+ else
+ {
+ MetadataCache.extensionCreatedState = NOTCREATED;
+ }
}
- return MetadataCache.extensionLoaded;
+ return (MetadataCache.extensionCreatedState == CREATED) ? true : false;
}
@@ -2207,15 +2274,6 @@ CitusHasBeenLoadedInternal(void)
return false;
}
- if (creating_extension && CurrentExtensionObject == citusExtensionOid)
- {
- /*
- * We do not use Citus hooks during CREATE/ALTER EXTENSION citus
- * since the objects used by the C code might be not be there yet.
- */
- return false;
- }
-
/* citus extension exists and has been created */
return true;
}
@@ -4151,10 +4209,6 @@ InitializeDistCache(void)
CreateShardIdCache();
InitializeDistObjectCache();
-
- /* Watch for invalidation events. */
- CacheRegisterRelcacheCallback(InvalidateDistRelationCacheCallback,
- (Datum) 0);
}
@@ -4367,9 +4421,9 @@ RegisterCitusTableCacheEntryReleaseCallbacks(void)
/*
- * GetLocalGroupId returns the group identifier of the local node. The function assumes
- * that pg_dist_local_node_group has exactly one row and has at least one column.
- * Otherwise, the function errors out.
+ * GetLocalGroupId returns the group identifier of the local node. The function
+ * assumes that pg_dist_local_group has exactly one row and has at least one
+ * column. Otherwise, the function errors out.
*/
int32
GetLocalGroupId(void)
@@ -4704,7 +4758,7 @@ InvalidateForeignKeyGraph(void)
* InvalidateDistRelationCacheCallback flushes cache entries when a relation
* is updated (or flushes the entire cache).
*/
-static void
+void
InvalidateDistRelationCacheCallback(Datum argument, Oid relationId)
{
/* invalidate either entire cache or a specific entry */
@@ -4712,12 +4766,18 @@ InvalidateDistRelationCacheCallback(Datum argument, Oid relationId)
{
InvalidateDistTableCache();
InvalidateDistObjectCache();
+ InvalidateMetadataSystemCache();
}
else
{
void *hashKey = (void *) &relationId;
bool foundInCache = false;
+ if (DistTableCacheHash == NULL)
+ {
+ return;
+ }
+
CitusTableCacheEntrySlot *cacheSlot =
hash_search(DistTableCacheHash, hashKey, HASH_FIND, &foundInCache);
if (foundInCache)
@@ -4726,21 +4786,19 @@ InvalidateDistRelationCacheCallback(Datum argument, Oid relationId)
}
/*
- * If pg_dist_partition is being invalidated drop all state
- * This happens pretty rarely, but most importantly happens during
- * DROP EXTENSION citus; This isn't the only time when this happens
- * though, it can happen for multiple other reasons, such as an
- * autovacuum running ANALYZE on pg_dist_partition. Such an ANALYZE
- * wouldn't really need a full Metadata cache invalidation, but we
- * don't know how to differentiate between DROP EXTENSION and ANALYZE.
- * So for now we simply drop it in both cases and take the slight
- * temporary performance hit.
+ * if pg_dist_partition relcache is invalidated for some reason,
+ * invalidate the MetadataCache. It is likely an overkill to invalidate
+ * the entire cache here. But until a better fix, we keep it this way
+ * for postgres regression tests that includes
+ * REINDEX SCHEMA CONCURRENTLY pg_catalog
+ * command.
*/
if (relationId == MetadataCache.distPartitionRelationId)
{
InvalidateMetadataSystemCache();
}
+
if (relationId == MetadataCache.distObjectRelationId)
{
InvalidateDistObjectCache();
@@ -4780,6 +4838,11 @@ InvalidateDistTableCache(void)
CitusTableCacheEntrySlot *cacheSlot = NULL;
HASH_SEQ_STATUS status;
+ if (DistTableCacheHash == NULL)
+ {
+ return;
+ }
+
hash_seq_init(&status, DistTableCacheHash);
while ((cacheSlot = (CitusTableCacheEntrySlot *) hash_seq_search(&status)) != NULL)
@@ -4798,6 +4861,11 @@ InvalidateDistObjectCache(void)
DistObjectCacheEntry *cacheEntry = NULL;
HASH_SEQ_STATUS status;
+ if (DistObjectCacheHash == NULL)
+ {
+ return;
+ }
+
hash_seq_init(&status, DistObjectCacheHash);
while ((cacheEntry = (DistObjectCacheEntry *) hash_seq_search(&status)) != NULL)
@@ -4880,8 +4948,8 @@ CreateDistObjectCache(void)
/*
- * InvalidateMetadataSystemCache resets all the cached OIDs and the extensionLoaded flag,
- * and invalidates the worker node, ConnParams, and local group ID caches.
+ * InvalidateMetadataSystemCache resets all the cached OIDs and the extensionCreatedState
+ * flag and invalidates the worker node, ConnParams, and local group ID caches.
*/
void
InvalidateMetadataSystemCache(void)
@@ -4939,8 +5007,8 @@ CitusTableTypeIdList(CitusTableType citusTableType)
Datum replicationModelDatum = datumArray[Anum_pg_dist_partition_repmodel - 1];
Datum colocationIdDatum = datumArray[Anum_pg_dist_partition_colocationid - 1];
- Oid partitionMethod = DatumGetChar(partMethodDatum);
- Oid replicationModel = DatumGetChar(replicationModelDatum);
+ char partitionMethod = DatumGetChar(partMethodDatum);
+ char replicationModel = DatumGetChar(replicationModelDatum);
uint32 colocationId = DatumGetUInt32(colocationIdDatum);
if (IsCitusTableTypeInternal(partitionMethod, replicationModel, colocationId,
@@ -5600,7 +5668,7 @@ role_exists(PG_FUNCTION_ARGS)
* Otherwise, this function returns NULL.
*/
char *
-GetPoolinfoViaCatalog(int64 nodeId)
+GetPoolinfoViaCatalog(int32 nodeId)
{
ScanKeyData scanKey[1];
const int scanKeyCount = 1;
diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c
index 7dfc30f73..40bdae0ea 100644
--- a/src/backend/distributed/metadata/metadata_sync.c
+++ b/src/backend/distributed/metadata/metadata_sync.c
@@ -150,6 +150,7 @@ static char * RemoteSchemaIdExpressionById(Oid schemaId);
static char * RemoteSchemaIdExpressionByName(char *schemaName);
static char * RemoteTypeIdExpression(Oid typeId);
static char * RemoteCollationIdExpression(Oid colocationId);
+static char * RemoteTableIdExpression(Oid relationId);
PG_FUNCTION_INFO_V1(start_metadata_sync_to_all_nodes);
@@ -167,6 +168,7 @@ PG_FUNCTION_INFO_V1(citus_internal_add_partition_metadata);
PG_FUNCTION_INFO_V1(citus_internal_delete_partition_metadata);
PG_FUNCTION_INFO_V1(citus_internal_add_shard_metadata);
PG_FUNCTION_INFO_V1(citus_internal_add_placement_metadata);
+PG_FUNCTION_INFO_V1(citus_internal_delete_placement_metadata);
PG_FUNCTION_INFO_V1(citus_internal_add_placement_metadata_legacy);
PG_FUNCTION_INFO_V1(citus_internal_update_placement_metadata);
PG_FUNCTION_INFO_V1(citus_internal_delete_shard_metadata);
@@ -176,6 +178,7 @@ PG_FUNCTION_INFO_V1(citus_internal_add_colocation_metadata);
PG_FUNCTION_INFO_V1(citus_internal_delete_colocation_metadata);
PG_FUNCTION_INFO_V1(citus_internal_add_tenant_schema);
PG_FUNCTION_INFO_V1(citus_internal_delete_tenant_schema);
+PG_FUNCTION_INFO_V1(citus_internal_update_none_dist_table_metadata);
static bool got_SIGTERM = false;
@@ -1757,8 +1760,8 @@ GetFunctionDependenciesForObjects(ObjectAddress *objectAddress)
ObjectIdGetDatum(objectAddress->objectId));
ScanKeyInit(&key[2],
Anum_pg_depend_objsubid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(objectAddress->objectSubId));
+ BTEqualStrategyNumber, F_INT4EQ,
+ Int32GetDatum(objectAddress->objectSubId));
SysScanDesc scan = systable_beginscan(depRel, DependDependerIndexId, true,
NULL, 3, key);
@@ -3449,6 +3452,28 @@ citus_internal_add_placement_metadata(PG_FUNCTION_ARGS)
}
+/*
+ * citus_internal_add_placement_metadata is an internal UDF to
+ * delete a row from pg_dist_placement.
+ */
+Datum
+citus_internal_delete_placement_metadata(PG_FUNCTION_ARGS)
+{
+ PG_ENSURE_ARGNOTNULL(0, "placement_id");
+ int64 placementId = PG_GETARG_INT64(0);
+
+ if (!ShouldSkipMetadataChecks())
+ {
+ /* this UDF is not allowed allowed for executing as a separate command */
+ EnsureCoordinatorInitiatedOperation();
+ }
+
+ DeleteShardPlacementRow(placementId);
+
+ PG_RETURN_VOID();
+}
+
+
/*
* citus_internal_add_placement_metadata_legacy is the old function that will be dropped.
*/
@@ -3836,6 +3861,40 @@ citus_internal_delete_tenant_schema(PG_FUNCTION_ARGS)
}
+/*
+ * citus_internal_update_none_dist_table_metadata is an internal UDF to
+ * update a row in pg_dist_partition that belongs to given none-distributed
+ * table.
+ */
+Datum
+citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS)
+{
+ CheckCitusVersion(ERROR);
+
+ PG_ENSURE_ARGNOTNULL(0, "relation_id");
+ Oid relationId = PG_GETARG_OID(0);
+
+ PG_ENSURE_ARGNOTNULL(1, "replication_model");
+ char replicationModel = PG_GETARG_CHAR(1);
+
+ PG_ENSURE_ARGNOTNULL(2, "colocation_id");
+ uint32 colocationId = PG_GETARG_INT32(2);
+
+ PG_ENSURE_ARGNOTNULL(3, "auto_converted");
+ bool autoConverted = PG_GETARG_BOOL(3);
+
+ if (!ShouldSkipMetadataChecks())
+ {
+ EnsureCoordinatorInitiatedOperation();
+ }
+
+ UpdateNoneDistTableMetadata(relationId, replicationModel,
+ colocationId, autoConverted);
+
+ PG_RETURN_VOID();
+}
+
+
/*
* SyncNewColocationGroup synchronizes a new pg_dist_colocation entry to a worker.
*/
@@ -4017,6 +4076,55 @@ TenantSchemaDeleteCommand(char *schemaName)
}
+/*
+ * UpdateNoneDistTableMetadataCommand returns a command to call
+ * citus_internal_update_none_dist_table_metadata().
+ */
+char *
+UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel,
+ uint32 colocationId, bool autoConverted)
+{
+ StringInfo command = makeStringInfo();
+ appendStringInfo(command,
+ "SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)",
+ RemoteTableIdExpression(relationId), replicationModel, colocationId,
+ autoConverted ? "true" : "false");
+
+ return command->data;
+}
+
+
+/*
+ * AddPlacementMetadataCommand returns a command to call
+ * citus_internal_add_placement_metadata().
+ */
+char *
+AddPlacementMetadataCommand(uint64 shardId, uint64 placementId,
+ uint64 shardLength, int32 groupId)
+{
+ StringInfo command = makeStringInfo();
+ appendStringInfo(command,
+ "SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)",
+ shardId, shardLength, groupId, placementId);
+ return command->data;
+}
+
+
+/*
+ * DeletePlacementMetadataCommand returns a command to call
+ * citus_internal_delete_placement_metadata().
+ */
+char *
+DeletePlacementMetadataCommand(uint64 placementId)
+{
+ StringInfo command = makeStringInfo();
+ appendStringInfo(command,
+ "SELECT pg_catalog.citus_internal_delete_placement_metadata(%ld)",
+ placementId);
+ return command->data;
+}
+
+
/*
* RemoteSchemaIdExpressionById returns an expression in text form that
* can be used to obtain the OID of the schema with given schema id on a
@@ -4051,6 +4159,22 @@ RemoteSchemaIdExpressionByName(char *schemaName)
}
+/*
+ * RemoteTableIdExpression returns an expression in text form that
+ * can be used to obtain the OID of given table on a different node
+ * when included in a query string.
+ */
+static char *
+RemoteTableIdExpression(Oid relationId)
+{
+ StringInfo regclassExpr = makeStringInfo();
+ appendStringInfo(regclassExpr, "%s::regclass",
+ quote_literal_cstr(generate_qualified_relation_name(relationId)));
+
+ return regclassExpr->data;
+}
+
+
/*
* SetMetadataSyncNodesFromNodeList sets list of nodes that needs to be metadata
* synced among given node list into metadataSyncContext.
diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c
index 6fc224738..ae0f6589a 100644
--- a/src/backend/distributed/metadata/metadata_utility.c
+++ b/src/backend/distributed/metadata/metadata_utility.c
@@ -29,6 +29,9 @@
#include "catalog/pg_constraint.h"
#include "catalog/pg_extension.h"
#include "catalog/pg_namespace.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "catalog/pg_proc_d.h"
+#endif
#include "catalog/pg_type.h"
#include "commands/extension.h"
#include "commands/sequence.h"
@@ -1395,6 +1398,17 @@ IsActiveShardPlacement(ShardPlacement *shardPlacement)
}
+/*
+ * IsRemoteShardPlacement returns true if the shard placement is on a remote
+ * node.
+ */
+bool
+IsRemoteShardPlacement(ShardPlacement *shardPlacement)
+{
+ return shardPlacement->groupId != GetLocalGroupId();
+}
+
+
/*
* IsPlacementOnWorkerNode checks if the shard placement is for to the given
* workenode.
@@ -1780,6 +1794,24 @@ InsertShardRow(Oid relationId, uint64 shardId, char storageType,
}
+/*
+ * InsertShardPlacementRowGlobally inserts shard placement that has given
+ * parameters into pg_dist_placement globally.
+ */
+ShardPlacement *
+InsertShardPlacementRowGlobally(uint64 shardId, uint64 placementId,
+ uint64 shardLength, int32 groupId)
+{
+ InsertShardPlacementRow(shardId, placementId, shardLength, groupId);
+
+ char *insertPlacementCommand =
+ AddPlacementMetadataCommand(shardId, placementId, shardLength, groupId);
+ SendCommandToWorkersWithMetadata(insertPlacementCommand);
+
+ return LoadShardPlacement(shardId, placementId);
+}
+
+
/*
* InsertShardPlacementRow opens the shard placement system catalog, and inserts
* a new row with the given values into that system catalog. If placementId is
@@ -1996,6 +2028,21 @@ DeleteShardRow(uint64 shardId)
}
+/*
+ * DeleteShardPlacementRowGlobally deletes shard placement that has given
+ * parameters from pg_dist_placement globally.
+ */
+void
+DeleteShardPlacementRowGlobally(uint64 placementId)
+{
+ DeleteShardPlacementRow(placementId);
+
+ char *deletePlacementCommand =
+ DeletePlacementMetadataCommand(placementId);
+ SendCommandToWorkersWithMetadata(deletePlacementCommand);
+}
+
+
/*
* DeleteShardPlacementRow opens the shard placement system catalog, finds the placement
* with the given placementId, and deletes it.
@@ -2240,6 +2287,93 @@ UpdateDistributionColumn(Oid relationId, char distributionMethod, Var *distribut
}
+/*
+ * UpdateNoneDistTableMetadataGlobally globally updates pg_dist_partition for
+ * given none-distributed table.
+ */
+void
+UpdateNoneDistTableMetadataGlobally(Oid relationId, char replicationModel,
+ uint32 colocationId, bool autoConverted)
+{
+ UpdateNoneDistTableMetadata(relationId, replicationModel,
+ colocationId, autoConverted);
+
+ if (ShouldSyncTableMetadata(relationId))
+ {
+ char *metadataCommand =
+ UpdateNoneDistTableMetadataCommand(relationId,
+ replicationModel,
+ colocationId,
+ autoConverted);
+ SendCommandToWorkersWithMetadata(metadataCommand);
+ }
+}
+
+
+/*
+ * UpdateNoneDistTableMetadata locally updates pg_dist_partition for given
+ * none-distributed table.
+ */
+void
+UpdateNoneDistTableMetadata(Oid relationId, char replicationModel, uint32 colocationId,
+ bool autoConverted)
+{
+ if (HasDistributionKey(relationId))
+ {
+ ereport(ERROR, (errmsg("cannot update metadata for a distributed "
+ "table that has a distribution column")));
+ }
+
+ ScanKeyData scanKey[1];
+ int scanKeyCount = 1;
+ bool indexOK = true;
+ Datum values[Natts_pg_dist_partition];
+ bool isnull[Natts_pg_dist_partition];
+ bool replace[Natts_pg_dist_partition];
+
+ Relation pgDistPartition = table_open(DistPartitionRelationId(), RowExclusiveLock);
+ TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
+ ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid,
+ BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
+
+ SysScanDesc scanDescriptor = systable_beginscan(pgDistPartition,
+ DistPartitionLogicalRelidIndexId(),
+ indexOK,
+ NULL, scanKeyCount, scanKey);
+
+ HeapTuple heapTuple = systable_getnext(scanDescriptor);
+ if (!HeapTupleIsValid(heapTuple))
+ {
+ ereport(ERROR, (errmsg("could not find valid entry for Citus table with oid: %u",
+ relationId)));
+ }
+
+ memset(replace, 0, sizeof(replace));
+
+ values[Anum_pg_dist_partition_colocationid - 1] = UInt32GetDatum(colocationId);
+ isnull[Anum_pg_dist_partition_colocationid - 1] = false;
+ replace[Anum_pg_dist_partition_colocationid - 1] = true;
+
+ values[Anum_pg_dist_partition_repmodel - 1] = CharGetDatum(replicationModel);
+ isnull[Anum_pg_dist_partition_repmodel - 1] = false;
+ replace[Anum_pg_dist_partition_repmodel - 1] = true;
+
+ values[Anum_pg_dist_partition_autoconverted - 1] = BoolGetDatum(autoConverted);
+ isnull[Anum_pg_dist_partition_autoconverted - 1] = false;
+ replace[Anum_pg_dist_partition_autoconverted - 1] = true;
+
+ heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace);
+
+ CatalogTupleUpdate(pgDistPartition, &heapTuple->t_self, heapTuple);
+
+ CitusInvalidateRelcacheByRelid(relationId);
+ CommandCounterIncrement();
+
+ systable_endscan(scanDescriptor);
+ table_close(pgDistPartition, NoLock);
+}
+
+
/*
* Check that the current user has `mode` permissions on relationId, error out
* if not. Superusers always have such permissions.
@@ -2263,7 +2397,7 @@ EnsureTablePermissions(Oid relationId, AclMode mode)
void
EnsureTableOwner(Oid relationId)
{
- if (!pg_class_ownercheck(relationId, GetUserId()))
+ if (!object_ownercheck(RelationRelationId, relationId, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE,
get_rel_name(relationId));
@@ -2278,7 +2412,7 @@ EnsureTableOwner(Oid relationId)
void
EnsureSchemaOwner(Oid schemaId)
{
- if (!pg_namespace_ownercheck(schemaId, GetUserId()))
+ if (!object_ownercheck(NamespaceRelationId, schemaId, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA,
get_namespace_name(schemaId));
@@ -2294,7 +2428,7 @@ EnsureSchemaOwner(Oid schemaId)
void
EnsureFunctionOwner(Oid functionId)
{
- if (!pg_proc_ownercheck(functionId, GetUserId()))
+ if (!object_ownercheck(ProcedureRelationId, functionId, GetUserId()))
{
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION,
get_func_name(functionId));
@@ -3286,11 +3420,11 @@ BackgroundTaskHasUmnetDependencies(int64 jobId, int64 taskId)
/* pg_catalog.pg_dist_background_task_depend.job_id = jobId */
ScanKeyInit(&scanKey[0], Anum_pg_dist_background_task_depend_job_id,
- BTEqualStrategyNumber, F_INT8EQ, jobId);
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(jobId));
/* pg_catalog.pg_dist_background_task_depend.task_id = $taskId */
ScanKeyInit(&scanKey[1], Anum_pg_dist_background_task_depend_task_id,
- BTEqualStrategyNumber, F_INT8EQ, taskId);
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(taskId));
SysScanDesc scanDescriptor =
systable_beginscan(pgDistBackgroundTasksDepend,
diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c
index 60a5ab92b..a73f2e9d2 100644
--- a/src/backend/distributed/metadata/node_metadata.c
+++ b/src/backend/distributed/metadata/node_metadata.c
@@ -9,7 +9,6 @@
#include "funcapi.h"
#include "utils/plancache.h"
-
#include "access/genam.h"
#include "access/heapam.h"
#include "access/htup.h"
@@ -102,8 +101,8 @@ static HeapTuple GetNodeByNodeId(int32 nodeId);
static int32 GetNextGroupId(void);
static int GetNextNodeId(void);
static void InsertPlaceholderCoordinatorRecord(void);
-static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, NodeMetadata
- *nodeMetadata);
+static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport,
+ NodeMetadata *nodeMetadata);
static void DeleteNodeRow(char *nodename, int32 nodeport);
static void BlockDistributedQueriesOnMetadataNodes(void);
static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple);
@@ -134,6 +133,13 @@ static void MarkNodesNotSyncedInLoopBackConnection(MetadataSyncContext *context,
static void EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid);
static void SetNodeMetadata(MetadataSyncContext *context, bool localOnly);
static void EnsureTransactionalMetadataSyncMode(void);
+static void LockShardsInWorkerPlacementList(WorkerNode *workerNode, LOCKMODE
+ lockMode);
+static BackgroundWorkerHandle * CheckBackgroundWorkerToObtainLocks(int32 lock_cooldown);
+static BackgroundWorkerHandle * LockPlacementsWithBackgroundWorkersInPrimaryNode(
+ WorkerNode *workerNode, bool force, int32 lock_cooldown);
+
+/* Function definitions go here */
/* declarations for dynamic loading */
PG_FUNCTION_INFO_V1(citus_set_coordinator_host);
@@ -152,6 +158,7 @@ PG_FUNCTION_INFO_V1(master_disable_node);
PG_FUNCTION_INFO_V1(citus_activate_node);
PG_FUNCTION_INFO_V1(master_activate_node);
PG_FUNCTION_INFO_V1(citus_update_node);
+PG_FUNCTION_INFO_V1(citus_pause_node_within_txn);
PG_FUNCTION_INFO_V1(master_update_node);
PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column);
PG_FUNCTION_INFO_V1(citus_nodename_for_nodeid);
@@ -160,7 +167,6 @@ PG_FUNCTION_INFO_V1(citus_coordinator_nodeid);
PG_FUNCTION_INFO_V1(citus_is_coordinator);
PG_FUNCTION_INFO_V1(citus_internal_mark_node_not_synced);
-
/*
* DefaultNodeMetadata creates a NodeMetadata struct with the fields set to
* sane defaults, e.g. nodeRack = WORKER_DEFAULT_RACK.
@@ -544,7 +550,8 @@ citus_disable_node(PG_FUNCTION_ARGS)
"metadata is not allowed"),
errhint("You can force disabling node, SELECT "
"citus_disable_node('%s', %d, "
- "synchronous:=true);", workerNode->workerName,
+ "synchronous:=true);",
+ workerNode->workerName,
nodePort),
errdetail("Citus uses the first worker node in the "
"metadata for certain internal operations when "
@@ -693,8 +700,7 @@ citus_set_node_property(PG_FUNCTION_ARGS)
else
{
ereport(ERROR, (errmsg(
- "only the 'shouldhaveshards' property can be set using this function"
- )));
+ "only the 'shouldhaveshards' property can be set using this function")));
}
TransactionModifiedNodeMetadata = true;
@@ -1160,6 +1166,100 @@ ActivateNodeList(MetadataSyncContext *context)
}
+/*
+ * Acquires shard metadata locks on all shards residing in the given worker node
+ *
+ * TODO: This function is not compatible with query from any node feature.
+ * To ensure proper behavior, it is essential to acquire locks on placements across all nodes
+ * rather than limiting it to just the coordinator (or the specific node from which this function is called)
+ */
+void
+LockShardsInWorkerPlacementList(WorkerNode *workerNode, LOCKMODE lockMode)
+{
+ List *placementList = AllShardPlacementsOnNodeGroup(workerNode->groupId);
+ LockShardsInPlacementListMetadata(placementList, lockMode);
+}
+
+
+/*
+ * This function is used to start a background worker to kill backends holding conflicting
+ * locks with this backend. It returns NULL if the background worker could not be started.
+ */
+BackgroundWorkerHandle *
+CheckBackgroundWorkerToObtainLocks(int32 lock_cooldown)
+{
+ BackgroundWorkerHandle *handle = StartLockAcquireHelperBackgroundWorker(MyProcPid,
+ lock_cooldown);
+ if (!handle)
+ {
+ /*
+ * We failed to start a background worker, which probably means that we exceeded
+ * max_worker_processes, and this is unlikely to be resolved by retrying. We do not want
+ * to repeatedly throw an error because if citus_update_node is called to complete a
+ * failover then finishing is the only way to bring the cluster back up. Therefore we
+ * give up on killing other backends and simply wait for the lock. We do set
+ * lock_timeout to lock_cooldown, because we don't want to wait forever to get a lock.
+ */
+ SetLockTimeoutLocally(lock_cooldown);
+ ereport(WARNING, (errmsg(
+ "could not start background worker to kill backends with conflicting"
+ " locks to force the update. Degrading to acquiring locks "
+ "with a lock time out."),
+ errhint(
+ "Increasing max_worker_processes might help.")));
+ }
+ return handle;
+}
+
+
+/*
+ * This function is used to lock shards in a primary node.
+ * If force is true, we start a background worker to kill backends holding
+ * conflicting locks with this backend.
+ *
+ * If the node is a primary node we block reads and writes.
+ *
+ * This lock has two purposes:
+ *
+ * - Ensure buggy code in Citus doesn't cause failures when the
+ * nodename/nodeport of a node changes mid-query
+ *
+ * - Provide fencing during failover, after this function returns all
+ * connections will use the new node location.
+ *
+ * Drawback:
+ *
+ * - This function blocks until all previous queries have finished. This
+ * means that long-running queries will prevent failover.
+ *
+ * In case of node failure said long-running queries will fail in the end
+ * anyway as they will be unable to commit successfully on the failed
+ * machine. To cause quick failure of these queries use force => true
+ * during the invocation of citus_update_node to terminate conflicting
+ * backends proactively.
+ *
+ * It might be worth blocking reads to a secondary for the same reasons,
+ * though we currently only query secondaries on follower clusters
+ * where these locks will have no effect.
+ */
+BackgroundWorkerHandle *
+LockPlacementsWithBackgroundWorkersInPrimaryNode(WorkerNode *workerNode, bool force, int32
+ lock_cooldown)
+{
+ BackgroundWorkerHandle *handle = NULL;
+
+ if (NodeIsPrimary(workerNode))
+ {
+ if (force)
+ {
+ handle = CheckBackgroundWorkerToObtainLocks(lock_cooldown);
+ }
+ LockShardsInWorkerPlacementList(workerNode, AccessExclusiveLock);
+ }
+ return handle;
+}
+
+
/*
* citus_update_node moves the requested node to a different nodename and nodeport. It
* locks to ensure no queries are running concurrently; and is intended for customers who
@@ -1188,8 +1288,6 @@ citus_update_node(PG_FUNCTION_ARGS)
int32 lock_cooldown = PG_GETARG_INT32(4);
char *newNodeNameString = text_to_cstring(newNodeName);
- List *placementList = NIL;
- BackgroundWorkerHandle *handle = NULL;
WorkerNode *workerNodeWithSameAddress = FindWorkerNodeAnyCluster(newNodeNameString,
newNodePort);
@@ -1226,64 +1324,9 @@ citus_update_node(PG_FUNCTION_ARGS)
EnsureTransactionalMetadataSyncMode();
}
- /*
- * If the node is a primary node we block reads and writes.
- *
- * This lock has two purposes:
- *
- * - Ensure buggy code in Citus doesn't cause failures when the
- * nodename/nodeport of a node changes mid-query
- *
- * - Provide fencing during failover, after this function returns all
- * connections will use the new node location.
- *
- * Drawback:
- *
- * - This function blocks until all previous queries have finished. This
- * means that long-running queries will prevent failover.
- *
- * In case of node failure said long-running queries will fail in the end
- * anyway as they will be unable to commit successfully on the failed
- * machine. To cause quick failure of these queries use force => true
- * during the invocation of citus_update_node to terminate conflicting
- * backends proactively.
- *
- * It might be worth blocking reads to a secondary for the same reasons,
- * though we currently only query secondaries on follower clusters
- * where these locks will have no effect.
- */
- if (NodeIsPrimary(workerNode))
- {
- /*
- * before acquiring the locks check if we want a background worker to help us to
- * aggressively obtain the locks.
- */
- if (force)
- {
- handle = StartLockAcquireHelperBackgroundWorker(MyProcPid, lock_cooldown);
- if (!handle)
- {
- /*
- * We failed to start a background worker, which probably means that we exceeded
- * max_worker_processes, and this is unlikely to be resolved by retrying. We do not want
- * to repeatedly throw an error because if citus_update_node is called to complete a
- * failover then finishing is the only way to bring the cluster back up. Therefore we
- * give up on killing other backends and simply wait for the lock. We do set
- * lock_timeout to lock_cooldown, because we don't want to wait forever to get a lock.
- */
- SetLockTimeoutLocally(lock_cooldown);
- ereport(WARNING, (errmsg(
- "could not start background worker to kill backends with conflicting"
- " locks to force the update. Degrading to acquiring locks "
- "with a lock time out."),
- errhint(
- "Increasing max_worker_processes might help.")));
- }
- }
-
- placementList = AllShardPlacementsOnNodeGroup(workerNode->groupId);
- LockShardsInPlacementListMetadata(placementList, AccessExclusiveLock);
- }
+ BackgroundWorkerHandle *handle = LockPlacementsWithBackgroundWorkersInPrimaryNode(
+ workerNode, force,
+ lock_cooldown);
/*
* if we have planned statements such as prepared statements, we should clear the cache so that
@@ -1330,6 +1373,34 @@ citus_update_node(PG_FUNCTION_ARGS)
}
+/*
+ * This function is designed to obtain locks for all the shards in a worker placement list.
+ * Once the transaction is committed, the acquired locks will be automatically released.
+ * Therefore, it is essential to invoke this function within a transaction.
+ * This function proves beneficial when there is a need to temporarily disable writes to a specific node within a transaction.
+ */
+Datum
+citus_pause_node_within_txn(PG_FUNCTION_ARGS)
+{
+ CheckCitusVersion(ERROR);
+
+ int32 nodeId = PG_GETARG_INT32(0);
+ bool force = PG_GETARG_BOOL(1);
+ int32 lock_cooldown = PG_GETARG_INT32(2);
+
+ WorkerNode *workerNode = FindNodeAnyClusterByNodeId(nodeId);
+ if (workerNode == NULL)
+ {
+ ereport(ERROR, (errcode(ERRCODE_NO_DATA_FOUND),
+ errmsg("node %u not found", nodeId)));
+ }
+
+ LockPlacementsWithBackgroundWorkersInPrimaryNode(workerNode, force, lock_cooldown);
+
+ PG_RETURN_VOID();
+}
+
+
/*
* master_update_node is a wrapper function for old UDF name.
*/
@@ -1947,7 +2018,8 @@ ErrorIfNodeContainsNonRemovablePlacements(WorkerNode *workerNode)
ereport(ERROR, (errmsg("cannot remove or disable the node "
"%s:%d because because it contains "
"the only shard placement for "
- "shard " UINT64_FORMAT, workerNode->workerName,
+ "shard " UINT64_FORMAT,
+ workerNode->workerName,
workerNode->workerPort, placement->shardId),
errdetail("One of the table(s) that prevents the operation "
"complete successfully is %s",
@@ -2499,7 +2571,8 @@ ErrorIfCoordinatorMetadataSetFalse(WorkerNode *workerNode, Datum value, char *fi
if (!valueBool && workerNode->groupId == COORDINATOR_GROUP_ID)
{
ereport(ERROR, (errmsg("cannot change \"%s\" field of the "
- "coordinator node", field)));
+ "coordinator node",
+ field)));
}
}
diff --git a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c
index bcd74fbbc..54f764fc1 100644
--- a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c
+++ b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c
@@ -93,7 +93,7 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("name or argument lists may not contain nulls")));
}
- typename = typeStringToTypeName(TextDatumGetCString(elems[0]));
+ typename = typeStringToTypeName_compat(TextDatumGetCString(elems[0]), NULL);
}
else if (type == OBJECT_LARGEOBJECT)
{
@@ -160,7 +160,8 @@ PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr)
errmsg("name or argument lists may not contain nulls")));
}
args = lappend(args,
- typeStringToTypeName(TextDatumGetCString(elems[i])));
+ typeStringToTypeName_compat(TextDatumGetCString(elems[i]),
+ NULL));
}
}
else
diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c
index 358927a09..d0fcc9612 100644
--- a/src/backend/distributed/operations/create_shards.c
+++ b/src/backend/distributed/operations/create_shards.c
@@ -82,8 +82,8 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
int32 replicationFactor, bool useExclusiveConnections)
{
CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(distributedTableId);
- bool colocatedShard = false;
List *insertedShardPlacements = NIL;
+ List *insertedShardIds = NIL;
/* make sure table is hash partitioned */
CheckHashPartitionedTable(distributedTableId);
@@ -175,7 +175,9 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
/* initialize the hash token space for this shard */
int32 shardMinHashToken = PG_INT32_MIN + (shardIndex * hashTokenIncrement);
int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1);
- uint64 shardId = GetNextShardId();
+ uint64 *shardIdPtr = (uint64 *) palloc0(sizeof(uint64));
+ *shardIdPtr = GetNextShardId();
+ insertedShardIds = lappend(insertedShardIds, shardIdPtr);
/* if we are at the last shard, make sure the max token value is INT_MAX */
if (shardIndex == (shardCount - 1))
@@ -187,21 +189,31 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
text *minHashTokenText = IntegerToText(shardMinHashToken);
text *maxHashTokenText = IntegerToText(shardMaxHashToken);
- InsertShardRow(distributedTableId, shardId, shardStorageType,
+ InsertShardRow(distributedTableId, *shardIdPtr, shardStorageType,
minHashTokenText, maxHashTokenText);
- List *currentInsertedShardPlacements = InsertShardPlacementRows(
- distributedTableId,
- shardId,
- workerNodeList,
- roundRobinNodeIndex,
- replicationFactor);
+ InsertShardPlacementRows(distributedTableId,
+ *shardIdPtr,
+ workerNodeList,
+ roundRobinNodeIndex,
+ replicationFactor);
+ }
+
+ /*
+ * load shard placements for the shard at once after all placement insertions
+ * finished. This prevents MetadataCache from rebuilding unnecessarily after
+ * each placement insertion.
+ */
+ uint64 *shardIdPtr;
+ foreach_ptr(shardIdPtr, insertedShardIds)
+ {
+ List *placementsForShard = ShardPlacementList(*shardIdPtr);
insertedShardPlacements = list_concat(insertedShardPlacements,
- currentInsertedShardPlacements);
+ placementsForShard);
}
CreateShardsOnWorkers(distributedTableId, insertedShardPlacements,
- useExclusiveConnections, colocatedShard);
+ useExclusiveConnections);
}
@@ -213,7 +225,6 @@ void
CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
useExclusiveConnections)
{
- bool colocatedShard = true;
List *insertedShardPlacements = NIL;
List *insertedShardIds = NIL;
@@ -294,7 +305,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
/*
* load shard placements for the shard at once after all placement insertions
- * finished. That prevents MetadataCache from rebuilding unnecessarily after
+ * finished. This prevents MetadataCache from rebuilding unnecessarily after
* each placement insertion.
*/
uint64 *shardIdPtr;
@@ -306,7 +317,7 @@ CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool
}
CreateShardsOnWorkers(targetRelationId, insertedShardPlacements,
- useExclusiveConnections, colocatedShard);
+ useExclusiveConnections);
}
@@ -322,7 +333,6 @@ CreateReferenceTableShard(Oid distributedTableId)
text *shardMinValue = NULL;
text *shardMaxValue = NULL;
bool useExclusiveConnection = false;
- bool colocatedShard = false;
/*
* In contrast to append/range partitioned tables it makes more sense to
@@ -363,12 +373,21 @@ CreateReferenceTableShard(Oid distributedTableId)
InsertShardRow(distributedTableId, shardId, shardStorageType, shardMinValue,
shardMaxValue);
- List *insertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId,
- nodeList, workerStartIndex,
- replicationFactor);
+ InsertShardPlacementRows(distributedTableId,
+ shardId,
+ nodeList,
+ workerStartIndex,
+ replicationFactor);
+
+ /*
+ * load shard placements for the shard at once after all placement insertions
+ * finished. This prevents MetadataCache from rebuilding unnecessarily after
+ * each placement insertion.
+ */
+ List *insertedShardPlacements = ShardPlacementList(shardId);
CreateShardsOnWorkers(distributedTableId, insertedShardPlacements,
- useExclusiveConnection, colocatedShard);
+ useExclusiveConnection);
}
@@ -400,13 +419,8 @@ CreateSingleShardTableShardWithRoundRobinPolicy(Oid relationId, uint32 colocatio
List *workerNodeList = DistributedTablePlacementNodeList(RowShareLock);
workerNodeList = SortList(workerNodeList, CompareWorkerNodes);
- int32 workerNodeCount = list_length(workerNodeList);
- if (workerNodeCount == 0)
- {
- ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("couldn't find any worker nodes"),
- errhint("Add more worker nodes")));
- }
+ int roundRobinNodeIdx =
+ EmptySingleShardTableColocationDecideNodeId(colocationId);
char shardStorageType = ShardStorageType(relationId);
text *minHashTokenText = NULL;
@@ -415,26 +429,51 @@ CreateSingleShardTableShardWithRoundRobinPolicy(Oid relationId, uint32 colocatio
InsertShardRow(relationId, shardId, shardStorageType,
minHashTokenText, maxHashTokenText);
- /* determine the node index based on colocation id */
- int roundRobinNodeIdx = colocationId % workerNodeCount;
-
int replicationFactor = 1;
- List *insertedShardPlacements = InsertShardPlacementRows(
- relationId,
- shardId,
- workerNodeList,
- roundRobinNodeIdx,
- replicationFactor);
+ InsertShardPlacementRows(relationId,
+ shardId,
+ workerNodeList,
+ roundRobinNodeIdx,
+ replicationFactor);
+
+ /*
+ * load shard placements for the shard at once after all placement insertions
+ * finished. This prevents MetadataCache from rebuilding unnecessarily after
+ * each placement insertion.
+ */
+ List *insertedShardPlacements = ShardPlacementList(shardId);
/*
* We don't need to force using exclusive connections because we're anyway
* creating a single shard.
*/
bool useExclusiveConnection = false;
-
- bool colocatedShard = false;
CreateShardsOnWorkers(relationId, insertedShardPlacements,
- useExclusiveConnection, colocatedShard);
+ useExclusiveConnection);
+}
+
+
+/*
+ * EmptySingleShardTableColocationDecideNodeId returns index of the node
+ * that first shard to be created in given "single-shard table colocation
+ * group" should be placed on.
+ *
+ * This is determined by modulo of the colocation id by the length of the
+ * list returned by DistributedTablePlacementNodeList().
+ */
+int
+EmptySingleShardTableColocationDecideNodeId(uint32 colocationId)
+{
+ List *workerNodeList = DistributedTablePlacementNodeList(RowShareLock);
+ int32 workerNodeCount = list_length(workerNodeList);
+ if (workerNodeCount == 0)
+ {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("couldn't find any worker nodes"),
+ errhint("Add more worker nodes")));
+ }
+
+ return colocationId % workerNodeCount;
}
diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c
index dca9906a6..a3f7092d1 100644
--- a/src/backend/distributed/operations/node_protocol.c
+++ b/src/backend/distributed/operations/node_protocol.c
@@ -277,7 +277,7 @@ master_get_new_placementid(PG_FUNCTION_ARGS)
/*
* GetNextPlacementId allocates and returns a unique placementId for
* the placement to be created. This allocation occurs both in shared memory
- * and in write ahead logs; writing to logs avoids the risk of having shardId
+ * and in write ahead logs; writing to logs avoids the risk of having placementId
* collisions.
*
* NB: This can be called by any user; for now we have decided that that's
@@ -612,7 +612,7 @@ GetPreLoadTableCreationCommands(Oid relationId,
{
List *tableDDLEventList = NIL;
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
/* fetch table schema and column option definitions */
char *tableSchemaDef = pg_get_tableschemadef_string(relationId,
@@ -665,7 +665,7 @@ GetPreLoadTableCreationCommands(Oid relationId,
tableDDLEventList = list_concat(tableDDLEventList, policyCommands);
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
return tableDDLEventList;
}
@@ -754,7 +754,7 @@ GatherIndexAndConstraintDefinitionList(Form_pg_index indexForm, List **indexDDLE
int indexFlags)
{
/* generate fully-qualified names */
- PushOverrideEmptySearchPath(CurrentMemoryContext);
+ int saveNestLevel = PushEmptySearchPath();
Oid indexId = indexForm->indexrelid;
bool indexImpliedByConstraint = IndexImpliedByAConstraint(indexForm);
@@ -805,7 +805,7 @@ GatherIndexAndConstraintDefinitionList(Form_pg_index indexForm, List **indexDDLE
}
/* revert back to original search_path */
- PopOverrideSearchPath();
+ PopEmptySearchPath(saveNestLevel);
}
diff --git a/src/backend/distributed/operations/replicate_none_dist_table_shard.c b/src/backend/distributed/operations/replicate_none_dist_table_shard.c
new file mode 100644
index 000000000..c28490367
--- /dev/null
+++ b/src/backend/distributed/operations/replicate_none_dist_table_shard.c
@@ -0,0 +1,301 @@
+/*-------------------------------------------------------------------------
+ *
+ * replicate_none_dist_table_shard.c
+ * Routines to replicate shard of none-distributed table to
+ * a remote node.
+ *
+ * Copyright (c) Citus Data, Inc.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+#include "miscadmin.h"
+#include "nodes/pg_list.h"
+
+#include "distributed/adaptive_executor.h"
+#include "distributed/commands.h"
+#include "distributed/commands/utility_hook.h"
+#include "distributed/coordinator_protocol.h"
+#include "distributed/deparse_shard_query.h"
+#include "distributed/listutils.h"
+#include "distributed/replicate_none_dist_table_shard.h"
+#include "distributed/shard_utils.h"
+#include "distributed/worker_manager.h"
+#include "distributed/worker_protocol.h"
+
+
+static void CreateForeignKeysFromReferenceTablesOnShards(Oid noneDistTableId);
+static Oid ForeignConstraintGetReferencingTableId(const char *queryString);
+static void EnsureNoneDistTableWithCoordinatorPlacement(Oid noneDistTableId);
+static void SetLocalEnableManualChangesToShard(bool state);
+
+
+/*
+ * NoneDistTableReplicateCoordinatorPlacement replicates local (presumably
+ * coordinator) shard placement of given none-distributed table to given
+ * target nodes and inserts records for new placements into pg_dist_placement.
+ */
+void
+NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId,
+ List *targetNodeList)
+{
+ EnsureCoordinator();
+ EnsureNoneDistTableWithCoordinatorPlacement(noneDistTableId);
+
+ /*
+ * We don't expect callers try to replicate the shard to remote nodes
+ * if some of the remote nodes have a placement for the shard already.
+ */
+ int64 shardId = GetFirstShardId(noneDistTableId);
+ List *remoteShardPlacementList =
+ FilterShardPlacementList(ActiveShardPlacementList(shardId),
+ IsRemoteShardPlacement);
+ if (list_length(remoteShardPlacementList) > 0)
+ {
+ ereport(ERROR, (errmsg("table already has a remote shard placement")));
+ }
+
+ uint64 shardLength = ShardLength(shardId);
+
+ /* insert new placements to pg_dist_placement */
+ List *insertedPlacementList = NIL;
+ WorkerNode *targetNode = NULL;
+ foreach_ptr(targetNode, targetNodeList)
+ {
+ ShardPlacement *shardPlacement =
+ InsertShardPlacementRowGlobally(shardId, GetNextPlacementId(),
+ shardLength, targetNode->groupId);
+
+ /* and save the placement for shard creation on workers */
+ insertedPlacementList = lappend(insertedPlacementList, shardPlacement);
+ }
+
+ /* create new placements */
+ bool useExclusiveConnection = false;
+ CreateShardsOnWorkers(noneDistTableId, insertedPlacementList,
+ useExclusiveConnection);
+
+ /* fetch coordinator placement before deleting it */
+ Oid localPlacementTableId = GetTableLocalShardOid(noneDistTableId, shardId);
+ ShardPlacement *coordinatorPlacement =
+ linitial(ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID));
+
+ /*
+ * CreateForeignKeysFromReferenceTablesOnShards and CopyFromLocalTableIntoDistTable
+ * need to ignore the local placement, hence we temporarily delete it before
+ * calling them.
+ */
+ DeleteShardPlacementRowGlobally(coordinatorPlacement->placementId);
+
+ /* and copy data from local placement to new placements */
+ CopyFromLocalTableIntoDistTable(
+ localPlacementTableId, noneDistTableId
+ );
+
+ /*
+ * CreateShardsOnWorkers only creates the foreign keys where given relation
+ * is the referencing one, so we need to create the foreign keys where given
+ * relation is the referenced one as well. We're only interested in the cases
+ * where the referencing relation is a reference table because the other
+ * possible table types --i.e., Citus local tables atm-- cannot have placements
+ * on remote nodes.
+ *
+ * Note that we need to create the foreign keys where given relation is the
+ * referenced one after copying the data so that constraint checks can pass.
+ */
+ CreateForeignKeysFromReferenceTablesOnShards(noneDistTableId);
+
+ /* using the same placement id, re-insert the deleted placement */
+ InsertShardPlacementRowGlobally(shardId, coordinatorPlacement->placementId,
+ shardLength, COORDINATOR_GROUP_ID);
+}
+
+
+/*
+ * NoneDistTableDeleteCoordinatorPlacement deletes pg_dist_placement record for
+ * local (presumably coordinator) shard placement of given none-distributed table.
+ */
+void
+NoneDistTableDeleteCoordinatorPlacement(Oid noneDistTableId)
+{
+ EnsureCoordinator();
+ EnsureNoneDistTableWithCoordinatorPlacement(noneDistTableId);
+
+ int64 shardId = GetFirstShardId(noneDistTableId);
+
+ /* we've already verified that table has a coordinator placement */
+ ShardPlacement *coordinatorPlacement =
+ linitial(ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID));
+
+ /* remove the old placement from metadata of local node, i.e., coordinator */
+ DeleteShardPlacementRowGlobally(coordinatorPlacement->placementId);
+}
+
+
+/*
+ * NoneDistTableDropCoordinatorPlacementTable drops local (presumably coordinator)
+ * shard placement table of given none-distributed table.
+ */
+void
+NoneDistTableDropCoordinatorPlacementTable(Oid noneDistTableId)
+{
+ EnsureCoordinator();
+
+ if (HasDistributionKey(noneDistTableId))
+ {
+ ereport(ERROR, (errmsg("table is not a none-distributed table")));
+ }
+
+ /*
+ * We undistribute Citus local tables that are not chained with any reference
+ * tables via foreign keys at the end of the utility hook.
+ * Here we temporarily set the related GUC to off to disable the logic for
+ * internally executed DDL's that might invoke this mechanism unnecessarily.
+ *
+ * We also temporarily disable citus.enable_manual_changes_to_shards GUC to
+ * allow given command to modify shard. Note that we disable it only for
+ * local session because changes made to shards are allowed for Citus internal
+ * backends anyway.
+ */
+ int saveNestLevel = NewGUCNestLevel();
+
+ SetLocalEnableLocalReferenceForeignKeys(false);
+ SetLocalEnableManualChangesToShard(true);
+
+ StringInfo dropShardCommand = makeStringInfo();
+ int64 shardId = GetFirstShardId(noneDistTableId);
+ ShardInterval *shardInterval = LoadShardInterval(shardId);
+ appendStringInfo(dropShardCommand, DROP_REGULAR_TABLE_COMMAND,
+ ConstructQualifiedShardName(shardInterval));
+
+ Task *task = CitusMakeNode(Task);
+ task->jobId = INVALID_JOB_ID;
+ task->taskId = INVALID_TASK_ID;
+ task->taskType = DDL_TASK;
+ task->replicationModel = REPLICATION_MODEL_INVALID;
+ SetTaskQueryString(task, dropShardCommand->data);
+
+ ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement);
+ SetPlacementNodeMetadata(targetPlacement, CoordinatorNodeIfAddedAsWorkerOrError());
+
+ task->taskPlacementList = list_make1(targetPlacement);
+
+ bool localExecutionSupported = true;
+ ExecuteUtilityTaskList(list_make1(task), localExecutionSupported);
+
+ AtEOXact_GUC(true, saveNestLevel);
+}
+
+
+/*
+ * CreateForeignKeysFromReferenceTablesOnShards creates foreign keys on shards
+ * where given none-distributed table is the referenced table and the referencing
+ * one is a reference table.
+ */
+static void
+CreateForeignKeysFromReferenceTablesOnShards(Oid noneDistTableId)
+{
+ EnsureCoordinator();
+
+ if (HasDistributionKey(noneDistTableId))
+ {
+ ereport(ERROR, (errmsg("table is not a none-distributed table")));
+ }
+
+ List *ddlCommandList =
+ GetForeignConstraintFromOtherReferenceTablesCommands(noneDistTableId);
+ if (list_length(ddlCommandList) == 0)
+ {
+ return;
+ }
+
+ List *taskList = NIL;
+
+ char *command = NULL;
+ foreach_ptr(command, ddlCommandList)
+ {
+ List *commandTaskList = InterShardDDLTaskList(
+ ForeignConstraintGetReferencingTableId(command),
+ noneDistTableId, command
+ );
+ taskList = list_concat(taskList, commandTaskList);
+ }
+
+ if (list_length(taskList) == 0)
+ {
+ return;
+ }
+
+ bool localExecutionSupported = true;
+ ExecuteUtilityTaskList(taskList, localExecutionSupported);
+}
+
+
+/*
+ * ForeignConstraintGetReferencedTableId parses given foreign constraint command and
+ * extracts refenrencing table id from it.
+ */
+static Oid
+ForeignConstraintGetReferencingTableId(const char *queryString)
+{
+ Node *queryNode = ParseTreeNode(queryString);
+ if (!IsA(queryNode, AlterTableStmt))
+ {
+ ereport(ERROR, (errmsg("command is not an ALTER TABLE statement")));
+ }
+
+ AlterTableStmt *foreignConstraintStmt = (AlterTableStmt *) queryNode;
+ if (list_length(foreignConstraintStmt->cmds) != 1)
+ {
+ ereport(ERROR, (errmsg("command does not contain a single command")));
+ }
+
+ AlterTableCmd *command = (AlterTableCmd *) linitial(foreignConstraintStmt->cmds);
+ if (command->subtype == AT_AddConstraint)
+ {
+ Constraint *constraint = (Constraint *) command->def;
+ if (constraint && constraint->contype == CONSTR_FOREIGN)
+ {
+ bool missingOk = false;
+ return RangeVarGetRelid(foreignConstraintStmt->relation, NoLock,
+ missingOk);
+ }
+ }
+
+ ereport(ERROR, (errmsg("command does not contain a foreign constraint")));
+}
+
+
+/*
+ * EnsureNoneDistTableWithCoordinatorPlacement throws an error if given
+ * table is not a none-distributed that has a coordinator placement.
+ */
+static void
+EnsureNoneDistTableWithCoordinatorPlacement(Oid noneDistTableId)
+{
+ if (HasDistributionKey(noneDistTableId))
+ {
+ ereport(ERROR, (errmsg("table is not a none-distributed table")));
+ }
+
+ int64 shardId = GetFirstShardId(noneDistTableId);
+ if (!ActiveShardPlacementListOnGroup(shardId, COORDINATOR_GROUP_ID))
+ {
+ ereport(ERROR, (errmsg("table does not have a coordinator placement")));
+ }
+}
+
+
+/*
+ * SetLocalEnableManualChangesToShard locally enables
+ * citus.enable_manual_changes_to_shards GUC.
+ */
+static void
+SetLocalEnableManualChangesToShard(bool state)
+{
+ set_config_option("citus.enable_manual_changes_to_shards",
+ state ? "on" : "off",
+ (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
+ GUC_ACTION_LOCAL, true, 0, false);
+}
diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c
index d98b30c17..42877bf10 100644
--- a/src/backend/distributed/operations/shard_cleaner.c
+++ b/src/backend/distributed/operations/shard_cleaner.c
@@ -450,7 +450,7 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem
/*
- * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup_record entry
+ * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry
* as part of the current transaction. This is primarily useful for deferred drop scenarios,
* since these records would roll back in case of operation failure.
*/
@@ -497,8 +497,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType,
/*
- * InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup_record entry
- * in a separate transaction to ensure the record persists after rollback. We should
+ * InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a
+ * separate transaction to ensure the record persists after rollback. We should
* delete these records if the operation completes successfully.
*
* For failure scenarios, use a subtransaction (direct insert via localhost).
@@ -541,7 +541,7 @@ InsertCleanupRecordInSubtransaction(CleanupObject objectType,
/*
- * DeleteCleanupRecordByRecordId deletes a cleanup record by record id.
+ * DeleteCleanupRecordByRecordIdOutsideTransaction deletes a cleanup record by record id.
*/
static void
DeleteCleanupRecordByRecordIdOutsideTransaction(uint64 recordId)
@@ -1005,7 +1005,7 @@ ListCleanupRecordsForCurrentOperation(void)
ScanKeyData scanKey[1];
ScanKeyInit(&scanKey[0], Anum_pg_dist_cleanup_operation_id, BTEqualStrategyNumber,
- F_INT8EQ, UInt64GetDatum(CurrentOperationId));
+ F_INT8EQ, Int64GetDatum(CurrentOperationId));
int scanKeyCount = 1;
Oid scanIndexId = InvalidOid;
@@ -1106,7 +1106,7 @@ TupleToCleanupRecord(HeapTuple heapTuple, TupleDesc tupleDescriptor)
/*
* CleanupRecordExists returns whether a cleanup record with the given
- * record ID exists in pg_dist_cleanup_record.
+ * record ID exists in pg_dist_cleanup.
*/
static bool
CleanupRecordExists(uint64 recordId)
@@ -1119,7 +1119,7 @@ CleanupRecordExists(uint64 recordId)
bool indexOK = true;
ScanKeyInit(&scanKey[0], Anum_pg_dist_cleanup_record_id,
- BTEqualStrategyNumber, F_INT8EQ, UInt64GetDatum(recordId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(recordId));
SysScanDesc scanDescriptor = systable_beginscan(pgDistCleanup,
DistCleanupPrimaryKeyIndexId(),
@@ -1139,7 +1139,7 @@ CleanupRecordExists(uint64 recordId)
/*
- * DeleteCleanupRecordByRecordId deletes a single pg_dist_cleanup_record entry.
+ * DeleteCleanupRecordByRecordId deletes a single pg_dist_cleanup entry.
*/
static void
DeleteCleanupRecordByRecordId(uint64 recordId)
@@ -1152,7 +1152,7 @@ DeleteCleanupRecordByRecordId(uint64 recordId)
bool indexOK = true;
ScanKeyInit(&scanKey[0], Anum_pg_dist_cleanup_record_id,
- BTEqualStrategyNumber, F_INT8EQ, UInt64GetDatum(recordId));
+ BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(recordId));
SysScanDesc scanDescriptor = systable_beginscan(pgDistCleanup,
DistCleanupPrimaryKeyIndexId(),
diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c
index 20004f5fb..e3ee4aa4d 100644
--- a/src/backend/distributed/operations/shard_rebalancer.c
+++ b/src/backend/distributed/operations/shard_rebalancer.c
@@ -526,6 +526,13 @@ GetRebalanceSteps(RebalanceOptions *options)
}
}
+ if (shardAllowedNodeCount < ShardReplicationFactor)
+ {
+ ereport(ERROR, (errmsg("Shard replication factor (%d) cannot be greater than "
+ "number of nodes with should_have_shards=true (%d).",
+ ShardReplicationFactor, shardAllowedNodeCount)));
+ }
+
List *activeShardPlacementListList = NIL;
List *unbalancedShards = NIL;
@@ -875,7 +882,7 @@ ExecutePlacementUpdates(List *placementUpdateList, Oid shardReplicationModeOid,
* ones) and the relation id of the target table. The dynamic shared memory
* portion consists of a RebalanceMonitorHeader and multiple
* PlacementUpdateEventProgress, one for each planned shard placement move. The
- * dsm_handle of the created segment is savedin the progress of the current backend so
+ * dsm_handle of the created segment is saved in the progress of the current backend so
* that it can be read by external agents such as get_rebalance_progress function by
* calling pg_stat_get_progress_info UDF. Since currently only VACUUM commands are
* officially allowed as the command type, we describe ourselves as a VACUUM command and
@@ -2373,8 +2380,8 @@ GetSetCommandListForNewConnections(void)
{
List *commandList = NIL;
- struct config_generic **guc_vars = get_guc_variables();
- int gucCount = GetNumConfigOptions();
+ int gucCount = 0;
+ struct config_generic **guc_vars = get_guc_variables_compat(&gucCount);
for (int gucIndex = 0; gucIndex < gucCount; gucIndex++)
{
@@ -2789,7 +2796,15 @@ FindAllowedTargetFillState(RebalanceState *state, uint64 shardId)
targetFillState->node,
state->functions->context))
{
- return targetFillState;
+ bool targetHasShard = PlacementsHashFind(state->placementsHash,
+ shardId,
+ targetFillState->node);
+
+ /* skip if the shard is already placed on the target node */
+ if (!targetHasShard)
+ {
+ return targetFillState;
+ }
}
}
return NULL;
diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c
index e2578c04c..0772b03b4 100644
--- a/src/backend/distributed/operations/shard_split.c
+++ b/src/backend/distributed/operations/shard_split.c
@@ -216,7 +216,7 @@ ErrorIfCannotSplitShard(SplitOperation splitOperation, ShardInterval *sourceShar
/*
- * Exteded checks before we decide to split the shard.
+ * Extended checks before we decide to split the shard.
* When all consumers (Example : ISOLATE_TENANT_TO_NEW_SHARD) directly call 'SplitShard' API,
* this method will be merged with 'ErrorIfCannotSplitShard' above.
*/
@@ -425,7 +425,7 @@ GetWorkerNodesFromWorkerIds(List *nodeIdsForPlacementList)
* 'shardInterval' : Source shard interval to be split.
* 'shardSplitPointsList' : Split Points list for the source 'shardInterval'.
* 'nodeIdsForPlacementList' : Placement list corresponding to split children.
- * 'distributionColumnList' : Maps relation IDs to distribution columns.
+ * 'distributionColumnOverrides': Maps relation IDs to distribution columns.
* If not specified, the distribution column is read
* from the metadata.
* 'colocatedShardIntervalList' : Shard interval list for colocation group. (only used for
diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c
index abaa00251..23925a315 100644
--- a/src/backend/distributed/operations/shard_transfer.c
+++ b/src/backend/distributed/operations/shard_transfer.c
@@ -1841,7 +1841,11 @@ CopyShardForeignConstraintCommandListGrouped(ShardInterval *shardInterval,
char *referencedSchemaName = get_namespace_name(referencedSchemaId);
char *escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName);
- if (IsCitusTableType(referencedRelationId, REFERENCE_TABLE))
+ if (relationId == referencedRelationId)
+ {
+ referencedShardId = shardInterval->shardId;
+ }
+ else if (IsCitusTableType(referencedRelationId, REFERENCE_TABLE))
{
referencedShardId = GetFirstShardId(referencedRelationId);
}
diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c
index ddab3453b..421593c66 100644
--- a/src/backend/distributed/operations/stage_protocol.c
+++ b/src/backend/distributed/operations/stage_protocol.c
@@ -312,8 +312,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
int attemptCount = replicationFactor;
int workerNodeCount = list_length(workerNodeList);
int placementsCreated = 0;
- List *foreignConstraintCommandList =
- GetReferencingForeignConstaintCommands(relationId);
IncludeSequenceDefaults includeSequenceDefaults = NO_SEQUENCE_DEFAULTS;
IncludeIdentities includeIdentityDefaults = NO_IDENTITY;
@@ -346,7 +344,6 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
uint32 nodeGroupId = workerNode->groupId;
char *nodeName = workerNode->workerName;
uint32 nodePort = workerNode->workerPort;
- int shardIndex = -1; /* not used in this code path */
const uint64 shardSize = 0;
MultiConnection *connection =
GetNodeUserDatabaseConnection(connectionFlag, nodeName, nodePort,
@@ -360,9 +357,8 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
continue;
}
- List *commandList = WorkerCreateShardCommandList(relationId, shardIndex, shardId,
- ddlCommandList,
- foreignConstraintCommandList);
+ List *commandList = WorkerCreateShardCommandList(relationId, shardId,
+ ddlCommandList);
ExecuteCriticalRemoteCommandList(connection, commandList);
@@ -387,47 +383,37 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId,
/*
* InsertShardPlacementRows inserts shard placements to the metadata table on
- * the coordinator node. Then, returns the list of added shard placements.
+ * the coordinator node.
*/
-List *
+void
InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList,
int workerStartIndex, int replicationFactor)
{
int workerNodeCount = list_length(workerNodeList);
- int placementsInserted = 0;
- List *insertedShardPlacements = NIL;
- for (int attemptNumber = 0; attemptNumber < replicationFactor; attemptNumber++)
+ for (int placementIndex = 0; placementIndex < replicationFactor; placementIndex++)
{
- int workerNodeIndex = (workerStartIndex + attemptNumber) % workerNodeCount;
+ int workerNodeIndex = (workerStartIndex + placementIndex) % workerNodeCount;
WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex);
uint32 nodeGroupId = workerNode->groupId;
const uint64 shardSize = 0;
- uint64 shardPlacementId = InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID,
- shardSize, nodeGroupId);
- ShardPlacement *shardPlacement = LoadShardPlacement(shardId, shardPlacementId);
- insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement);
-
- placementsInserted++;
- if (placementsInserted >= replicationFactor)
- {
- break;
- }
+ InsertShardPlacementRow(shardId,
+ INVALID_PLACEMENT_ID,
+ shardSize,
+ nodeGroupId);
}
-
- return insertedShardPlacements;
}
/*
* CreateShardsOnWorkers creates shards on worker nodes given the shard placements
- * as a parameter The function creates the shards via the executor. This means
+ * as a parameter. The function creates the shards via the executor. This means
* that it can adopt the number of connections required to create the shards.
*/
void
CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
- bool useExclusiveConnection, bool colocatedShard)
+ bool useExclusiveConnection)
{
IncludeSequenceDefaults includeSequenceDefaults = NO_SEQUENCE_DEFAULTS;
IncludeIdentities includeIdentityDefaults = NO_IDENTITY;
@@ -437,8 +423,6 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
includeSequenceDefaults,
includeIdentityDefaults,
creatingShellTableOnRemoteNode);
- List *foreignConstraintCommandList =
- GetReferencingForeignConstaintCommands(distributedRelationId);
int taskId = 1;
List *taskList = NIL;
@@ -449,18 +433,10 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
{
uint64 shardId = shardPlacement->shardId;
ShardInterval *shardInterval = LoadShardInterval(shardId);
- int shardIndex = -1;
List *relationShardList = RelationShardListForShardCreate(shardInterval);
- if (colocatedShard)
- {
- shardIndex = ShardIndex(shardInterval);
- }
-
List *commandList = WorkerCreateShardCommandList(distributedRelationId,
- shardIndex,
- shardId, ddlCommandList,
- foreignConstraintCommandList);
+ shardId, ddlCommandList);
Task *task = CitusMakeNode(Task);
task->jobId = INVALID_JOB_ID;
@@ -604,14 +580,12 @@ RelationShardListForShardCreate(ShardInterval *shardInterval)
* shardId to create the shard on the worker node.
*/
List *
-WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId,
- List *ddlCommandList,
- List *foreignConstraintCommandList)
+WorkerCreateShardCommandList(Oid relationId, uint64 shardId,
+ List *ddlCommandList)
{
List *commandList = NIL;
Oid schemaId = get_rel_namespace(relationId);
char *schemaName = get_namespace_name(schemaId);
- char *escapedSchemaName = quote_literal_cstr(schemaName);
TableDDLCommand *ddlCommand = NULL;
foreach_ptr(ddlCommand, ddlCommandList)
@@ -622,57 +596,12 @@ WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId,
commandList = lappend(commandList, applyDDLCommand);
}
- const char *command = NULL;
- foreach_ptr(command, foreignConstraintCommandList)
- {
- char *escapedCommand = quote_literal_cstr(command);
+ ShardInterval *shardInterval = LoadShardInterval(shardId);
- uint64 referencedShardId = INVALID_SHARD_ID;
-
- StringInfo applyForeignConstraintCommand = makeStringInfo();
-
- /* we need to parse the foreign constraint command to get referencing table id */
- Oid referencedRelationId = ForeignConstraintGetReferencedTableId(command);
- if (referencedRelationId == InvalidOid)
- {
- ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot create foreign key constraint"),
- errdetail("Referenced relation cannot be found.")));
- }
-
- Oid referencedSchemaId = get_rel_namespace(referencedRelationId);
- char *referencedSchemaName = get_namespace_name(referencedSchemaId);
- char *escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName);
-
- /*
- * In case of self referencing shards, relation itself might not be distributed
- * already. Therefore we cannot use ColocatedShardIdInRelation which assumes
- * given relation is distributed. Besides, since we know foreign key references
- * itself, referencedShardId is actual shardId anyway. Also, if the referenced
- * relation is a reference table, we cannot use ColocatedShardIdInRelation since
- * reference tables only have one shard. Instead, we fetch the one and only shard
- * from shardlist and use it.
- */
- if (relationId == referencedRelationId)
- {
- referencedShardId = shardId;
- }
- else if (IsCitusTableType(referencedRelationId, REFERENCE_TABLE))
- {
- referencedShardId = GetFirstShardId(referencedRelationId);
- }
- else
- {
- referencedShardId = ColocatedShardIdInRelation(referencedRelationId,
- shardIndex);
- }
-
- appendStringInfo(applyForeignConstraintCommand,
- WORKER_APPLY_INTER_SHARD_DDL_COMMAND, shardId, escapedSchemaName,
- referencedShardId, escapedReferencedSchemaName, escapedCommand);
-
- commandList = lappend(commandList, applyForeignConstraintCommand->data);
- }
+ commandList = list_concat(
+ commandList,
+ CopyShardForeignConstraintCommandList(shardInterval)
+ );
/*
* If the shard is created for a partition, send the command to create the
@@ -680,7 +609,6 @@ WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId,
*/
if (PartitionTable(relationId))
{
- ShardInterval *shardInterval = LoadShardInterval(shardId);
char *attachPartitionCommand = GenerateAttachShardPartitionCommand(shardInterval);
commandList = lappend(commandList, attachPartitionCommand);
diff --git a/src/backend/distributed/operations/worker_split_copy_udf.c b/src/backend/distributed/operations/worker_split_copy_udf.c
index c154ac040..18fdbfc4a 100644
--- a/src/backend/distributed/operations/worker_split_copy_udf.c
+++ b/src/backend/distributed/operations/worker_split_copy_udf.c
@@ -52,7 +52,7 @@ static char * TraceWorkerSplitCopyUdf(char *sourceShardToCopySchemaName,
* worker_split_copy(source_shard_id bigint, splitCopyInfo pg_catalog.split_copy_info[])
* UDF to split copy shard to list of destination shards.
* 'source_shard_id' : Source ShardId to split copy.
- * 'splitCopyInfos' : Array of Split Copy Info (destination_shard's id, min/max ranges and node_id)
+ * 'splitCopyInfos' : Array of Split Copy Info (destination_shard's id, min/max ranges and node_id)
*/
Datum
worker_split_copy(PG_FUNCTION_ARGS)
@@ -139,7 +139,7 @@ TraceWorkerSplitCopyUdf(char *sourceShardToCopySchemaName,
appendStringInfo(splitCopyTrace, "performing copy from shard %s to [",
sourceShardToCopyQualifiedName);
- /* split copy always has atleast two destinations */
+ /* split copy always has at least two destinations */
int index = 1;
int splitWayCount = list_length(splitCopyInfoList);
SplitCopyInfo *splitCopyInfo = NULL;
diff --git a/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c b/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c
index 85c2328c7..4d116dfa1 100644
--- a/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c
+++ b/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c
@@ -243,7 +243,7 @@ CreateShardSplitInfo(uint64 sourceShardIdToSplit,
/*
- * AddShardSplitInfoEntryForNodeInMap function add's ShardSplitInfo entry
+ * AddShardSplitInfoEntryForNodeInMap function adds ShardSplitInfo entry
* to the hash map. The key is nodeId on which the new shard is to be placed.
*/
static void
diff --git a/src/backend/distributed/planner/planner_readme.md b/src/backend/distributed/planner/README.md
similarity index 100%
rename from src/backend/distributed/planner/planner_readme.md
rename to src/backend/distributed/planner/README.md
diff --git a/src/backend/distributed/planner/deparse_shard_query.c b/src/backend/distributed/planner/deparse_shard_query.c
index 5743ab1c1..ac37b1399 100644
--- a/src/backend/distributed/planner/deparse_shard_query.c
+++ b/src/backend/distributed/planner/deparse_shard_query.c
@@ -358,6 +358,11 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
subquery->jointree = joinTree;
rte->rtekind = RTE_SUBQUERY;
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /* no permission checking for this RTE */
+ rte->perminfoindex = 0;
+#endif
rte->subquery = subquery;
rte->alias = copyObject(rte->eref);
}
diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c
index 3b6a8f9f7..65278d1ea 100644
--- a/src/backend/distributed/planner/distributed_planner.c
+++ b/src/backend/distributed/planner/distributed_planner.c
@@ -56,6 +56,9 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/pg_list.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "parser/parse_relation.h"
+#endif
#include "parser/parsetree.h"
#include "parser/parse_type.h"
#include "optimizer/optimizer.h"
@@ -108,6 +111,7 @@ static int AssignRTEIdentities(List *rangeTableList, int rteIdCounter);
static void AssignRTEIdentity(RangeTblEntry *rangeTableEntry, int rteIdentifier);
static void AdjustPartitioningForDistributedPlanning(List *rangeTableList,
bool setPartitionedTablesInherited);
+static bool RTEWentThroughAdjustPartitioning(RangeTblEntry *rangeTableEntry);
static PlannedStmt * FinalizeNonRouterPlan(PlannedStmt *localPlan,
DistributedPlan *distributedPlan,
CustomScan *customScan);
@@ -144,6 +148,8 @@ static void WarnIfListHasForeignDistributedTable(List *rangeTableList);
static RouterPlanType GetRouterPlanType(Query *query,
Query *originalQuery,
bool hasUnresolvedParams);
+static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
+ PlannedStmt *concatPlan);
/* Distributed planner hook */
@@ -494,6 +500,20 @@ AdjustPartitioningForDistributedPlanning(List *rangeTableList,
}
+/*
+ * RTEWentThroughAdjustPartitioning returns true if the given rangetableentry
+ * has been modified through AdjustPartitioningForDistributedPlanning
+ * function, false otherwise.
+ */
+static bool
+RTEWentThroughAdjustPartitioning(RangeTblEntry *rangeTableEntry)
+{
+ return (rangeTableEntry->rtekind == RTE_RELATION &&
+ PartitionedTable(rangeTableEntry->relid) &&
+ rangeTableEntry->inh == false);
+}
+
+
/*
* AssignRTEIdentity assigns the given rteIdentifier to the given range table
* entry.
@@ -1066,6 +1086,11 @@ CreateDistributedPlan(uint64 planId, bool allowRecursivePlanning, Query *origina
/*
* Plan subqueries and CTEs that cannot be pushed down by recursively
* calling the planner and return the resulting plans to subPlanList.
+ * Note that GenerateSubplansForSubqueriesAndCTEs will reset perminfoindexes
+ * for some RTEs in originalQuery->rtable list, while not changing
+ * originalQuery->rteperminfos. That's fine because we will go through
+ * standard_planner again, which will adjust things accordingly in
+ * set_plan_references>add_rtes_to_flat_rtable>add_rte_to_flat_rtable.
*/
List *subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery,
plannerRestrictionContext);
@@ -1465,12 +1490,42 @@ FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan,
finalPlan->utilityStmt = localPlan->utilityStmt;
/* add original range table list for access permission checks */
- finalPlan->rtable = list_concat(finalPlan->rtable, localPlan->rtable);
+ ConcatenateRTablesAndPerminfos(finalPlan, localPlan);
return finalPlan;
}
+static void
+ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan)
+{
+ mainPlan->rtable = list_concat(mainPlan->rtable, concatPlan->rtable);
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /*
+ * concatPlan's range table list is concatenated to mainPlan's range table list
+ * therefore all the perminfoindexes should be updated to their value
+ * PLUS the highest perminfoindex in mainPlan's perminfos, which is exactly
+ * the list length.
+ */
+ int mainPlan_highest_perminfoindex = list_length(mainPlan->permInfos);
+
+ ListCell *lc;
+ foreach(lc, concatPlan->rtable)
+ {
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+ if (rte->perminfoindex != 0)
+ {
+ rte->perminfoindex = rte->perminfoindex + mainPlan_highest_perminfoindex;
+ }
+ }
+
+ /* finally, concatenate perminfos as well */
+ mainPlan->permInfos = list_concat(mainPlan->permInfos, concatPlan->permInfos);
+#endif
+}
+
+
/*
* FinalizeRouterPlan gets a CustomScan node which already wrapped distributed
* part of a router plan and sets it as the direct child of the router plan
@@ -1502,7 +1557,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
routerPlan->rtable = list_make1(remoteScanRangeTableEntry);
/* add original range table list for access permission checks */
- routerPlan->rtable = list_concat(routerPlan->rtable, localPlan->rtable);
+ ConcatenateRTablesAndPerminfos(routerPlan, localPlan);
routerPlan->canSetTag = true;
routerPlan->relationOids = NIL;
@@ -1976,6 +2031,62 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
}
+/*
+ * multi_get_relation_info_hook modifies the relation's indexlist
+ * if necessary, to avoid a crash in PG16 caused by our
+ * Citus function AdjustPartitioningForDistributedPlanning().
+ *
+ * AdjustPartitioningForDistributedPlanning() is a hack that we use
+ * to prevent Postgres' standard_planner() to expand all the partitions
+ * for the distributed planning when a distributed partitioned table
+ * is queried. It is required for both correctness and performance
+ * reasons. Although we can eliminate the use of the function for
+ * the correctness (e.g., make sure that rest of the planner can handle
+ * partitions), it's performance implication is hard to avoid. Certain
+ * planning logic of Citus (such as router or query pushdown) relies
+ * heavily on the relationRestrictionList. If
+ * AdjustPartitioningForDistributedPlanning() is removed, all the
+ * partitions show up in the relationRestrictionList, causing high
+ * planning times for such queries.
+ */
+void
+multi_get_relation_info_hook(PlannerInfo *root, Oid relationObjectId, bool inhparent,
+ RelOptInfo *rel)
+{
+ if (!CitusHasBeenLoaded())
+ {
+ return;
+ }
+
+ Index varno = rel->relid;
+ RangeTblEntry *rangeTableEntry = planner_rt_fetch(varno, root);
+
+ if (RTEWentThroughAdjustPartitioning(rangeTableEntry))
+ {
+ ListCell *lc = NULL;
+ foreach(lc, rel->indexlist)
+ {
+ IndexOptInfo *indexOptInfo = (IndexOptInfo *) lfirst(lc);
+ if (get_rel_relkind(indexOptInfo->indexoid) == RELKIND_PARTITIONED_INDEX)
+ {
+ /*
+ * Normally, we should not need this. However, the combination of
+ * Postgres commit 3c569049b7b502bb4952483d19ce622ff0af5fd6 and
+ * Citus function AdjustPartitioningForDistributedPlanning()
+ * forces us to do this. The commit expects partitioned indexes
+ * to belong to relations with "inh" flag set properly. Whereas, the
+ * function overrides "inh" flag. To avoid a crash,
+ * we go over the list of indexinfos and remove all partitioned indexes.
+ * Partitioned indexes were ignored pre PG16 anyway, we are essentially
+ * not breaking any logic.
+ */
+ rel->indexlist = foreach_delete_current(rel->indexlist, lc);
+ }
+ }
+ }
+}
+
+
/*
* TranslatedVars deep copies the translated vars for the given relation index
* if there is any append rel list.
diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c
index 7b97d3ff5..933ee7425 100644
--- a/src/backend/distributed/planner/fast_path_router_planner.c
+++ b/src/backend/distributed/planner/fast_path_router_planner.c
@@ -103,15 +103,24 @@ PlannedStmt *
GeneratePlaceHolderPlannedStmt(Query *parse)
{
PlannedStmt *result = makeNode(PlannedStmt);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ SeqScan *scanNode = makeNode(SeqScan);
+ Plan *plan = &(scanNode->scan.plan);
+#else
Scan *scanNode = makeNode(Scan);
Plan *plan = &scanNode->plan;
+#endif
Node *distKey PG_USED_FOR_ASSERTS_ONLY = NULL;
Assert(FastPathRouterQuery(parse, &distKey));
/* there is only a single relation rte */
+#if PG_VERSION_NUM >= PG_VERSION_16
+ scanNode->scan.scanrelid = 1;
+#else
scanNode->scanrelid = 1;
+#endif
plan->targetlist =
copyObject(FetchStatementTargetList((Node *) parse));
@@ -127,6 +136,9 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
result->stmt_len = parse->stmt_len;
result->rtable = copyObject(parse->rtable);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ result->permInfos = copyObject(parse->rteperminfos);
+#endif
result->planTree = (Plan *) plan;
result->hasReturning = (parse->returningList != NIL);
diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c
index c59e920b5..1b7f468f8 100644
--- a/src/backend/distributed/planner/insert_select_planner.c
+++ b/src/backend/distributed/planner/insert_select_planner.c
@@ -61,6 +61,9 @@ static DistributedPlan * CreateInsertSelectPlanInternal(uint64 planId,
static DistributedPlan * CreateDistributedInsertSelectPlan(Query *originalQuery,
PlannerRestrictionContext *
plannerRestrictionContext);
+static bool InsertSelectHasRouterSelect(Query *originalQuery,
+ PlannerRestrictionContext *
+ plannerRestrictionContext);
static Task * RouterModifyTaskForShardInterval(Query *originalQuery,
CitusTableCacheEntry *targetTableCacheEntry,
ShardInterval *shardInterval,
@@ -75,6 +78,7 @@ static DeferredErrorMessage * DistributedInsertSelectSupported(Query *queryTree,
RangeTblEntry *insertRte,
RangeTblEntry *subqueryRte,
bool allReferenceTables,
+ bool routerSelect,
PlannerRestrictionContext *
plannerRestrictionContext);
static DeferredErrorMessage * InsertPartitionColumnMatchesSelect(Query *query,
@@ -282,6 +286,9 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
RelationRestrictionContext *relationRestrictionContext =
plannerRestrictionContext->relationRestrictionContext;
bool allReferenceTables = relationRestrictionContext->allReferenceTables;
+ bool routerSelect =
+ InsertSelectHasRouterSelect(copyObject(originalQuery),
+ plannerRestrictionContext);
distributedPlan->modLevel = RowModifyLevelForQuery(originalQuery);
@@ -293,13 +300,27 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
insertRte,
subqueryRte,
allReferenceTables,
+ routerSelect,
plannerRestrictionContext);
if (distributedPlan->planningError)
{
return distributedPlan;
}
+
+ /*
+ * if the query goes to a single node ("router" in Citus' parlance),
+ * we don't need to go through AllDistributionKeysInQueryAreEqual checks.
+ *
+ * For PG16+, this is required as some of the outer JOINs are converted to
+ * "ON(true)" and filters are pushed down to the table scans. As
+ * AllDistributionKeysInQueryAreEqual rely on JOIN filters, it will fail to
+ * detect the router case. However, we can still detect it by checking if
+ * the query is a router query as the router query checks the filters on
+ * the tables.
+ */
bool allDistributionKeysInQueryAreEqual =
+ routerSelect ||
AllDistributionKeysInQueryAreEqual(originalQuery, plannerRestrictionContext);
/*
@@ -361,6 +382,23 @@ CreateDistributedInsertSelectPlan(Query *originalQuery,
}
+/*
+ * InsertSelectHasRouterSelect is a helper function that returns true of the SELECT
+ * part of the INSERT .. SELECT query is a router query.
+ */
+static bool
+InsertSelectHasRouterSelect(Query *originalQuery,
+ PlannerRestrictionContext *plannerRestrictionContext)
+{
+ RangeTblEntry *subqueryRte = ExtractSelectRangeTableEntry(originalQuery);
+ DistributedPlan *distributedPlan = CreateRouterPlan(subqueryRte->subquery,
+ subqueryRte->subquery,
+ plannerRestrictionContext);
+
+ return distributedPlan->planningError == NULL;
+}
+
+
/*
* CreateInsertSelectIntoLocalTablePlan creates the plan for INSERT .. SELECT queries
* where the selected table is distributed and the inserted table is not.
@@ -566,6 +604,22 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan)
combineQuery->querySource = QSRC_ORIGINAL;
combineQuery->canSetTag = true;
combineQuery->rtable = list_make1(rangeTableEntry);
+
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /*
+ * This part of the code is more of a sanity check for readability,
+ * it doesn't really do anything.
+ * We know that Only relation RTEs and subquery RTEs that were once relation
+ * RTEs (views) have their perminfoindex set. (see ExecCheckPermissions function)
+ * DerivedRangeTableEntry sets the rtekind to RTE_FUNCTION
+ * Hence we should have no perminfos here.
+ */
+ Assert(rangeTableEntry->rtekind == RTE_FUNCTION &&
+ rangeTableEntry->perminfoindex == 0);
+ combineQuery->rteperminfos = NIL;
+#endif
+
combineQuery->targetList = targetList;
combineQuery->jointree = joinTree;
return combineQuery;
@@ -615,6 +669,7 @@ CreateTargetListForCombineQuery(List *targetList)
static DeferredErrorMessage *
DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte,
RangeTblEntry *subqueryRte, bool allReferenceTables,
+ bool routerSelect,
PlannerRestrictionContext *plannerRestrictionContext)
{
Oid selectPartitionColumnTableId = InvalidOid;
@@ -689,19 +744,28 @@ DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte,
NULL, NULL);
}
- /* first apply toplevel pushdown checks to SELECT query */
- DeferredErrorMessage *error = DeferErrorIfUnsupportedSubqueryPushdown(subquery,
- plannerRestrictionContext);
- if (error)
- {
- return error;
- }
+ DeferredErrorMessage *error = NULL;
- /* then apply subquery pushdown checks to SELECT query */
- error = DeferErrorIfCannotPushdownSubquery(subquery, false);
- if (error)
+ /*
+ * We can skip SQL support related checks for router queries as
+ * they are safe to route with any SQL.
+ */
+ if (!routerSelect)
{
- return error;
+ /* first apply toplevel pushdown checks to SELECT query */
+ error =
+ DeferErrorIfUnsupportedSubqueryPushdown(subquery, plannerRestrictionContext);
+ if (error)
+ {
+ return error;
+ }
+
+ /* then apply subquery pushdown checks to SELECT query */
+ error = DeferErrorIfCannotPushdownSubquery(subquery, false);
+ if (error)
+ {
+ return error;
+ }
}
if (IsCitusTableType(targetRelationId, CITUS_LOCAL_TABLE))
@@ -853,15 +917,7 @@ RouterModifyTaskForShardInterval(Query *originalQuery,
continue;
}
-
- /*
- * passing NULL for plannerInfo will be problematic if we have placeholder
- * vars. However, it won't be the case here because we are building
- * the expression from shard intervals which don't have placeholder vars.
- * Note that this is only the case with PG14 as the parameter doesn't exist
- * prior to that.
- */
- shardRestrictionList = make_simple_restrictinfo(NULL,
+ shardRestrictionList = make_simple_restrictinfo(restriction->plannerInfo,
(Expr *) shardOpExpressions);
extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo,
shardRestrictionList);
@@ -1493,6 +1549,20 @@ WrapSubquery(Query *subquery)
selectAlias, false, true));
outerQuery->rtable = list_make1(newRangeTableEntry);
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /*
+ * This part of the code is more of a sanity check for readability,
+ * it doesn't really do anything.
+ * addRangeTableEntryForSubquery doesn't add permission info
+ * because the range table is set to be RTE_SUBQUERY.
+ * Hence we should also have no perminfos here.
+ */
+ Assert(newRangeTableEntry->rtekind == RTE_SUBQUERY &&
+ newRangeTableEntry->perminfoindex == 0);
+ outerQuery->rteperminfos = NIL;
+#endif
+
/* set the FROM expression to the subquery */
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
newRangeTableRef->rtindex = 1;
diff --git a/src/backend/distributed/planner/local_distributed_join_planner.c b/src/backend/distributed/planner/local_distributed_join_planner.c
index 2c6a63de1..d93921966 100644
--- a/src/backend/distributed/planner/local_distributed_join_planner.c
+++ b/src/backend/distributed/planner/local_distributed_join_planner.c
@@ -107,6 +107,7 @@
#include "optimizer/optimizer.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
+#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
@@ -136,6 +137,9 @@ typedef struct RangeTableEntryDetails
RangeTblEntry *rangeTableEntry;
List *requiredAttributeNumbers;
bool hasConstantFilterOnUniqueColumn;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ RTEPermissionInfo *perminfo;
+#endif
} RangeTableEntryDetails;
/*
@@ -176,7 +180,8 @@ static bool HasConstantFilterOnUniqueColumn(RangeTblEntry *rangeTableEntry,
static ConversionCandidates * CreateConversionCandidates(PlannerRestrictionContext *
plannerRestrictionContext,
List *rangeTableList,
- int resultRTEIdentity);
+ int resultRTEIdentity,
+ List *rteperminfos);
static void AppendUniqueIndexColumnsToList(Form_pg_index indexForm, List **uniqueIndexes,
int flags);
static ConversionChoice GetConversionChoice(ConversionCandidates *
@@ -205,10 +210,17 @@ RecursivelyPlanLocalTableJoins(Query *query,
GetPlannerRestrictionContext(context);
List *rangeTableList = query->rtable;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ List *rteperminfos = query->rteperminfos;
+#endif
int resultRTEIdentity = ResultRTEIdentity(query);
ConversionCandidates *conversionCandidates =
CreateConversionCandidates(plannerRestrictionContext,
- rangeTableList, resultRTEIdentity);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ rangeTableList, resultRTEIdentity, rteperminfos);
+#else
+ rangeTableList, resultRTEIdentity, NIL);
+#endif
ConversionChoice conversionChoise =
GetConversionChoice(conversionCandidates, plannerRestrictionContext);
@@ -323,7 +335,12 @@ ConvertRTEsToSubquery(List *rangeTableEntryDetailsList, RecursivePlanningContext
RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry;
List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers;
ReplaceRTERelationWithRteSubquery(rangeTableEntry,
- requiredAttributeNumbers, context);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ requiredAttributeNumbers, context,
+ rangeTableEntryDetails->perminfo);
+#else
+ requiredAttributeNumbers, context, NULL);
+#endif
}
}
@@ -530,7 +547,9 @@ RequiredAttrNumbersForRelationInternal(Query *queryToProcess, int rteIndex)
*/
static ConversionCandidates *
CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
- List *rangeTableList, int resultRTEIdentity)
+ List *rangeTableList,
+ int resultRTEIdentity,
+ List *rteperminfos)
{
ConversionCandidates *conversionCandidates =
palloc0(sizeof(ConversionCandidates));
@@ -564,6 +583,14 @@ CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
RequiredAttrNumbersForRelation(rangeTableEntry, plannerRestrictionContext);
rangeTableEntryDetails->hasConstantFilterOnUniqueColumn =
HasConstantFilterOnUniqueColumn(rangeTableEntry, relationRestriction);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ rangeTableEntryDetails->perminfo = NULL;
+ if (rangeTableEntry->perminfoindex)
+ {
+ rangeTableEntryDetails->perminfo = getRTEPermissionInfo(rteperminfos,
+ rangeTableEntry);
+ }
+#endif
bool referenceOrDistributedTable =
IsCitusTableType(rangeTableEntry->relid, REFERENCE_TABLE) ||
diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c
index 6a80a7c33..3cadea23a 100644
--- a/src/backend/distributed/planner/merge_planner.c
+++ b/src/backend/distributed/planner/merge_planner.c
@@ -15,6 +15,7 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
+#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
@@ -57,6 +58,9 @@ static DeferredErrorMessage * DeferErrorIfRoutableMergeNotSupported(Query *query
*
plannerRestrictionContext,
Oid targetRelationId);
+static bool MergeSourceHasRouterSelect(Query *query,
+ PlannerRestrictionContext *
+ plannerRestrictionContext);
static DeferredErrorMessage * MergeQualAndTargetListFunctionsSupported(Oid
resultRelationId,
Query *query,
@@ -234,7 +238,7 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ
ParamListInfo boundParams)
{
Query *mergeQuery = copyObject(originalQuery);
- RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery);
+ RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery, false);
DistributedPlan *distributedPlan = CitusMakeNode(DistributedPlan);
ereport(DEBUG1, (errmsg("Creating MERGE repartition plan")));
@@ -774,6 +778,11 @@ ConvertCteRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte)
Query *cteQuery = (Query *) copyObject(sourceCte->ctequery);
sourceRte->rtekind = RTE_SUBQUERY;
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /* sanity check - sourceRte was RTE_CTE previously so it should have no perminfo */
+ Assert(sourceRte->perminfoindex == 0);
+#endif
/*
* As we are delinking the CTE from main query, we have to walk through the
@@ -824,6 +833,20 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
RangeTblEntry *newRangeTableEntry = copyObject(sourceRte);
sourceResultsQuery->rtable = list_make1(newRangeTableEntry);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ sourceResultsQuery->rteperminfos = NIL;
+ if (sourceRte->perminfoindex)
+ {
+ /* create permission info for newRangeTableEntry */
+ RTEPermissionInfo *perminfo = getRTEPermissionInfo(mergeQuery->rteperminfos,
+ sourceRte);
+
+ /* update the sourceResultsQuery's rteperminfos accordingly */
+ newRangeTableEntry->perminfoindex = 1;
+ sourceResultsQuery->rteperminfos = list_make1(perminfo);
+ }
+#endif
+
/* set the FROM expression to the subquery */
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
@@ -849,6 +872,9 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
/* replace the function with the constructed subquery */
sourceRte->rtekind = RTE_SUBQUERY;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ sourceRte->perminfoindex = 0;
+#endif
sourceRte->subquery = sourceResultsQuery;
sourceRte->inh = false;
}
@@ -959,7 +985,8 @@ DeferErrorIfTargetHasFalseClause(Oid targetRelationId,
List *baseRestrictionList = relationRestriction->relOptInfo->baserestrictinfo;
List *restrictClauseList = get_all_actual_clauses(baseRestrictionList);
- if (ContainsFalseClause(restrictClauseList))
+ if (ContainsFalseClause(restrictClauseList) ||
+ JoinConditionIsOnFalse(relationRestriction->relOptInfo->joininfo))
{
return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
"Routing query is not possible with "
@@ -1047,22 +1074,41 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
"must be colocated", NULL, NULL);
}
- DeferredErrorMessage *deferredError =
- DeferErrorIfUnsupportedSubqueryPushdown(query,
- plannerRestrictionContext);
- if (deferredError)
- {
- ereport(DEBUG1, (errmsg("Sub-query is not pushable, try repartitioning")));
- return deferredError;
- }
+ DeferredErrorMessage *deferredError = NULL;
- if (HasDangerousJoinUsing(query->rtable, (Node *) query->jointree))
+
+ /*
+ * if the query goes to a single node ("router" in Citus' parlance),
+ * we don't need to go through certain SQL support and colocation checks.
+ *
+ * For PG16+, this is required as some of the outer JOINs are converted to
+ * "ON(true)" and filters are pushed down to the table scans. As
+ * DeferErrorIfUnsupportedSubqueryPushdown rely on JOIN filters, it will fail to
+ * detect the router case. However, we can still detect it by checking if
+ * the query is a router query as the router query checks the filters on
+ * the tables.
+ */
+
+
+ if (!MergeSourceHasRouterSelect(query, plannerRestrictionContext))
{
- ereport(DEBUG1, (errmsg(
- "Query has ambigious joins, merge is not pushable, try repartitioning")));
- return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
- "a join with USING causes an internal naming "
- "conflict, use ON instead", NULL, NULL);
+ deferredError =
+ DeferErrorIfUnsupportedSubqueryPushdown(query,
+ plannerRestrictionContext);
+ if (deferredError)
+ {
+ ereport(DEBUG1, (errmsg("Sub-query is not pushable, try repartitioning")));
+ return deferredError;
+ }
+
+ if (HasDangerousJoinUsing(query->rtable, (Node *) query->jointree))
+ {
+ ereport(DEBUG1, (errmsg(
+ "Query has ambigious joins, merge is not pushable, try repartitioning")));
+ return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
+ "a join with USING causes an internal naming "
+ "conflict, use ON instead", NULL, NULL);
+ }
}
deferredError = DeferErrorIfTargetHasFalseClause(targetRelationId,
@@ -1080,6 +1126,36 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList,
}
+/*
+ * MergeSourceHasRouterSelect is a helper function that returns true of the source
+ * part of the merge query is a router query.
+ */
+static bool
+MergeSourceHasRouterSelect(Query *query,
+ PlannerRestrictionContext *plannerRestrictionContext)
+{
+ Query *copiedQuery = copyObject(query);
+ RangeTblEntry *mergeSourceRte = ExtractMergeSourceRangeTableEntry(copiedQuery, true);
+
+ if (mergeSourceRte == NULL)
+ {
+ /*
+ * We might potentially support this case in the future, but for now,
+ * we don't support MERGE with JOIN in the source.
+ */
+ return false;
+ }
+
+ ConvertSourceRTEIntoSubquery(copiedQuery, mergeSourceRte, plannerRestrictionContext);
+ Query *sourceQuery = mergeSourceRte->subquery;
+
+ DistributedPlan *distributedPlan = CreateRouterPlan(sourceQuery, sourceQuery,
+ plannerRestrictionContext);
+
+ return distributedPlan->planningError == NULL;
+}
+
+
/*
* ErrorIfMergeQueryQualAndTargetListNotSupported does check for a MERGE command in the query, if it finds
* one, it will verify the below criteria
@@ -1184,26 +1260,37 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
{
if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED))
{
- ereport(ERROR, (errmsg("MERGE operation on non-colocated "
- "distributed table(s) without a shard "
- "key is not yet supported")));
+ ereport(ERROR, (errmsg("MERGE operation across distributed schemas "
+ "or with a row-based distributed table is "
+ "not yet supported")));
}
/* Get all the Join conditions from the ON clause */
List *mergeJoinConditionList = WhereClauseList(mergeQuery->jointree);
Var *targetColumn = targetRelation->partitionColumn;
Var *sourceRepartitionVar = NULL;
+ bool foundTypeMismatch = false;
OpExpr *validJoinClause =
- SinglePartitionJoinClause(list_make1(targetColumn), mergeJoinConditionList);
+ SinglePartitionJoinClause(list_make1(targetColumn), mergeJoinConditionList,
+ &foundTypeMismatch);
if (!validJoinClause)
{
+ if (foundTypeMismatch)
+ {
+ ereport(ERROR, (errmsg("In the MERGE ON clause, there is a datatype mismatch "
+ "between target's distribution "
+ "column and the expression originating from the source."),
+ errdetail(
+ "If the types are different, Citus uses different hash "
+ "functions for the two column types, which might "
+ "lead to incorrect repartitioning of the result data")));
+ }
+
ereport(ERROR, (errmsg("The required join operation is missing between "
"the target's distribution column and any "
"expression originating from the source. The "
- "issue may arise from either a non-equi-join or "
- "a mismatch in the datatypes of the columns being "
- "joined."),
+ "issue may arise from a non-equi-join."),
errdetail("Without a equi-join condition on the target's "
"distribution column, the source rows "
"cannot be efficiently redistributed, and "
@@ -1277,7 +1364,7 @@ SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList,
* table or source query in USING clause.
*/
RangeTblEntry *
-ExtractMergeSourceRangeTableEntry(Query *query)
+ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk)
{
/* function is void for pre-15 versions of Postgres */
#if PG_VERSION_NUM < PG_VERSION_15
@@ -1290,7 +1377,10 @@ ExtractMergeSourceRangeTableEntry(Query *query)
List *fromList = query->jointree->fromlist;
- /* We should have only one RTE(MergeStmt->sourceRelation) in the from-list */
+ /*
+ * We should have only one RTE(MergeStmt->sourceRelation) in the from-list
+ * unless Postgres community changes the representation of merge.
+ */
if (list_length(fromList) != 1)
{
ereport(ERROR, (errmsg("Unexpected source list in MERGE sql USING clause")));
@@ -1305,12 +1395,18 @@ ExtractMergeSourceRangeTableEntry(Query *query)
*/
if (reference->rtindex == 0)
{
- ereport(ERROR, (errmsg("Source is not an explicit query"),
- errhint("Source query is a Join expression, "
- "try converting into a query as SELECT * "
- "FROM (..Join..)")));
+ if (!joinSourceOk)
+ {
+ ereport(ERROR, (errmsg("Source is not an explicit query"),
+ errhint("Source query is a Join expression, "
+ "try converting into a query as SELECT * "
+ "FROM (..Join..)")));
+ }
+
+ return NULL;
}
+
Assert(reference->rtindex >= 1);
RangeTblEntry *subqueryRte = rt_fetch(reference->rtindex, query->rtable);
diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c
index 674077b46..94d125f41 100644
--- a/src/backend/distributed/planner/multi_explain.c
+++ b/src/backend/distributed/planner/multi_explain.c
@@ -295,7 +295,7 @@ NonPushableMergeCommandExplainScan(CustomScanState *node, List *ancestors,
CitusScanState *scanState = (CitusScanState *) node;
DistributedPlan *distributedPlan = scanState->distributedPlan;
Query *mergeQuery = distributedPlan->modifyQueryViaCoordinatorOrRepartition;
- RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery);
+ RangeTblEntry *sourceRte = ExtractMergeSourceRangeTableEntry(mergeQuery, false);
/*
* Create a copy because ExplainOneQuery can modify the query, and later
@@ -992,12 +992,18 @@ BuildRemoteExplainQuery(char *queryString, ExplainState *es)
appendStringInfo(explainQuery,
"EXPLAIN (ANALYZE %s, VERBOSE %s, "
"COSTS %s, BUFFERS %s, WAL %s, "
+#if PG_VERSION_NUM >= PG_VERSION_16
+ "GENERIC_PLAN %s, "
+#endif
"TIMING %s, SUMMARY %s, FORMAT %s) %s",
es->analyze ? "TRUE" : "FALSE",
es->verbose ? "TRUE" : "FALSE",
es->costs ? "TRUE" : "FALSE",
es->buffers ? "TRUE" : "FALSE",
es->wal ? "TRUE" : "FALSE",
+#if PG_VERSION_NUM >= PG_VERSION_16
+ es->generic ? "TRUE" : "FALSE",
+#endif
es->timing ? "TRUE" : "FALSE",
es->summary ? "TRUE" : "FALSE",
formatStr,
diff --git a/src/backend/distributed/planner/multi_join_order.c b/src/backend/distributed/planner/multi_join_order.c
index 79007b70d..7714a1e08 100644
--- a/src/backend/distributed/planner/multi_join_order.c
+++ b/src/backend/distributed/planner/multi_join_order.c
@@ -999,7 +999,8 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
}
OpExpr *joinClause =
- SinglePartitionJoinClause(currentPartitionColumnList, applicableJoinClauses);
+ SinglePartitionJoinClause(currentPartitionColumnList, applicableJoinClauses,
+ NULL);
if (joinClause != NULL)
{
if (currentPartitionMethod == DISTRIBUTE_BY_HASH)
@@ -1037,7 +1038,8 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
*/
List *candidatePartitionColumnList = list_make1(candidatePartitionColumn);
joinClause = SinglePartitionJoinClause(candidatePartitionColumnList,
- applicableJoinClauses);
+ applicableJoinClauses,
+ NULL);
if (joinClause != NULL)
{
if (candidatePartitionMethod == DISTRIBUTE_BY_HASH)
@@ -1078,8 +1080,14 @@ SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable,
* clause exists, the function returns NULL.
*/
OpExpr *
-SinglePartitionJoinClause(List *partitionColumnList, List *applicableJoinClauses)
+SinglePartitionJoinClause(List *partitionColumnList, List *applicableJoinClauses, bool
+ *foundTypeMismatch)
{
+ if (foundTypeMismatch)
+ {
+ *foundTypeMismatch = false;
+ }
+
if (list_length(partitionColumnList) == 0)
{
return NULL;
@@ -1121,6 +1129,10 @@ SinglePartitionJoinClause(List *partitionColumnList, List *applicableJoinClauses
{
ereport(DEBUG1, (errmsg("single partition column types do not "
"match")));
+ if (foundTypeMismatch)
+ {
+ *foundTypeMismatch = true;
+ }
}
}
}
diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c
index fa9e5bb61..0969e0c7c 100644
--- a/src/backend/distributed/planner/multi_logical_planner.c
+++ b/src/backend/distributed/planner/multi_logical_planner.c
@@ -2140,7 +2140,8 @@ ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode,
* we introduce a (re-)partition operator for the other column.
*/
OpExpr *joinClause = SinglePartitionJoinClause(partitionColumnList,
- applicableJoinClauses);
+ applicableJoinClauses,
+ NULL);
Assert(joinClause != NULL);
/* both are verified in SinglePartitionJoinClause to not be NULL, assert is to guard */
diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c
index 6ad51e0ae..0d7a0de78 100644
--- a/src/backend/distributed/planner/multi_router_planner.c
+++ b/src/backend/distributed/planner/multi_router_planner.c
@@ -155,6 +155,7 @@ static DeferredErrorMessage * ErrorIfQueryHasUnroutableModifyingCTE(Query *query
static DeferredErrorMessage * ErrorIfQueryHasCTEWithSearchClause(Query *queryTree);
static bool ContainsSearchClauseWalker(Node *node, void *context);
static bool SelectsFromDistributedTable(List *rangeTableList, Query *query);
+static bool AllShardsColocated(List *relationShardList);
static ShardPlacement * CreateDummyPlacement(bool hasLocalRelation);
static ShardPlacement * CreateLocalDummyPlacement();
static int CompareInsertValuesByShardId(const void *leftElement,
@@ -395,7 +396,7 @@ ExtractSourceResultRangeTableEntry(Query *query)
{
if (IsMergeQuery(query))
{
- return ExtractMergeSourceRangeTableEntry(query);
+ return ExtractMergeSourceRangeTableEntry(query, false);
}
else if (CheckInsertSelectQuery(query))
{
@@ -2392,6 +2393,15 @@ PlanRouterQuery(Query *originalQuery,
RelationShardListForShardIntervalList(*prunedShardIntervalListList,
&shardsPresent);
+ if (!EnableNonColocatedRouterQueryPushdown &&
+ !AllShardsColocated(*relationShardList))
+ {
+ return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
+ "router planner does not support queries that "
+ "reference non-colocated distributed tables",
+ NULL, NULL);
+ }
+
if (!shardsPresent && !replacePrunedQueryWithDummy)
{
/*
@@ -2460,6 +2470,92 @@ PlanRouterQuery(Query *originalQuery,
}
+/*
+ * AllShardsColocated returns true if all the shards in the given relationShardList
+ * have colocated tables and are on the same shard index.
+ */
+static bool
+AllShardsColocated(List *relationShardList)
+{
+ RelationShard *relationShard = NULL;
+ int shardIndex = -1;
+ int colocationId = -1;
+ CitusTableType tableType = ANY_CITUS_TABLE_TYPE;
+
+ foreach_ptr(relationShard, relationShardList)
+ {
+ Oid relationId = relationShard->relationId;
+ uint64 shardId = relationShard->shardId;
+ if (shardId == INVALID_SHARD_ID)
+ {
+ /* intermediate results are always colocated, so ignore */
+ continue;
+ }
+
+ CitusTableCacheEntry *tableEntry = LookupCitusTableCacheEntry(relationId);
+ if (tableEntry == NULL)
+ {
+ /* local tables never colocated */
+ return false;
+ }
+
+ CitusTableType currentTableType = GetCitusTableType(tableEntry);
+ if (currentTableType == REFERENCE_TABLE)
+ {
+ /*
+ * Reference tables are always colocated so it is
+ * safe to skip them.
+ */
+ continue;
+ }
+ else if (IsCitusTableTypeCacheEntry(tableEntry, DISTRIBUTED_TABLE))
+ {
+ if (tableType == ANY_CITUS_TABLE_TYPE)
+ {
+ tableType = currentTableType;
+ }
+ else if (tableType != currentTableType)
+ {
+ /*
+ * We cannot qualify different types of distributed tables
+ * as colocated.
+ */
+ return false;
+ }
+
+ if (currentTableType == RANGE_DISTRIBUTED ||
+ currentTableType == APPEND_DISTRIBUTED)
+ {
+ /* we do not have further strict colocation chceks */
+ continue;
+ }
+ }
+
+ int currentColocationId = TableColocationId(relationId);
+ if (colocationId == -1)
+ {
+ colocationId = currentColocationId;
+ }
+ else if (colocationId != currentColocationId)
+ {
+ return false;
+ }
+
+ int currentIndex = ShardIndex(LoadShardInterval(shardId));
+ if (shardIndex == -1)
+ {
+ shardIndex = currentIndex;
+ }
+ else if (shardIndex != currentIndex)
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
/*
* ContainsOnlyLocalTables returns true if there is only
* local tables and not any distributed or reference table.
@@ -3745,15 +3841,6 @@ DeferErrorIfUnsupportedRouterPlannableSelectQuery(Query *query)
NULL, NULL);
}
- if (!EnableNonColocatedRouterQueryPushdown &&
- !AllDistributedRelationsInListColocated(distributedRelationList))
- {
- return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED,
- "router planner does not support queries that "
- "reference non-colocated distributed tables",
- NULL, NULL);
- }
-
DeferredErrorMessage *CTEWithSearchClauseError =
ErrorIfQueryHasCTEWithSearchClause(query);
if (CTEWithSearchClauseError != NULL)
diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c
index a6e64b9c1..77baab197 100644
--- a/src/backend/distributed/planner/query_colocation_checker.c
+++ b/src/backend/distributed/planner/query_colocation_checker.c
@@ -83,7 +83,16 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti
* functions (i.e., FilterPlannerRestrictionForQuery()) rely on queries
* not relations.
*/
- anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ RTEPermissionInfo *perminfo = NULL;
+ if (anchorRangeTblEntry->perminfoindex)
+ {
+ perminfo = getRTEPermissionInfo(subquery->rteperminfos, anchorRangeTblEntry);
+ }
+ anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, perminfo);
+#else
+ anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, NULL);
+#endif
}
else if (anchorRangeTblEntry->rtekind == RTE_SUBQUERY)
{
@@ -126,7 +135,7 @@ static RangeTblEntry *
AnchorRte(Query *subquery)
{
FromExpr *joinTree = subquery->jointree;
- Relids joinRelIds = get_relids_in_jointree((Node *) joinTree, false);
+ Relids joinRelIds = get_relids_in_jointree_compat((Node *) joinTree, false, false);
int currentRTEIndex = -1;
RangeTblEntry *anchorRangeTblEntry = NULL;
@@ -266,7 +275,9 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker)
* designed for generating a stub query.
*/
Query *
-WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes)
+WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
+ List *requiredAttributes,
+ RTEPermissionInfo *perminfo)
{
Query *subquery = makeNode(Query);
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
@@ -277,6 +288,14 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes
RangeTblEntry *newRangeTableEntry = copyObject(rteRelation);
subquery->rtable = list_make1(newRangeTableEntry);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (perminfo)
+ {
+ newRangeTableEntry->perminfoindex = 1;
+ subquery->rteperminfos = list_make1(perminfo);
+ }
+#endif
+
/* set the FROM expression to the subquery */
newRangeTableRef = makeNode(RangeTblRef);
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c
index cbe6a3606..3bad73459 100644
--- a/src/backend/distributed/planner/query_pushdown_planning.c
+++ b/src/backend/distributed/planner/query_pushdown_planning.c
@@ -1414,6 +1414,12 @@ RelationInfoContainsOnlyRecurringTuples(PlannerInfo *plannerInfo, Relids relids)
while ((relationId = bms_next_member(relids, relationId)) >= 0)
{
+ /* outer join RTE check in PG16 */
+ if (IsRelOptOuterJoin(plannerInfo, relationId))
+ {
+ continue;
+ }
+
RangeTblEntry *rangeTableEntry = plannerInfo->simple_rte_array[relationId];
if (FindNodeMatchingCheckFunctionInRangeTableList(list_make1(rangeTableEntry),
@@ -1915,6 +1921,9 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery)
pushedDownQuery->targetList = subqueryTargetEntryList;
pushedDownQuery->jointree = copyObject(queryTree->jointree);
pushedDownQuery->rtable = copyObject(queryTree->rtable);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ pushedDownQuery->rteperminfos = copyObject(queryTree->rteperminfos);
+#endif
pushedDownQuery->setOperations = copyObject(queryTree->setOperations);
pushedDownQuery->querySource = queryTree->querySource;
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;
diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c
index f582fd9df..c2426cf5f 100644
--- a/src/backend/distributed/planner/recursive_planning.c
+++ b/src/backend/distributed/planner/recursive_planning.c
@@ -80,6 +80,7 @@
#include "optimizer/optimizer.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
+#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
@@ -886,8 +887,19 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
List *requiredAttributes =
RequiredAttrNumbersForRelation(distributedRte, restrictionContext);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ RTEPermissionInfo *perminfo = NULL;
+ if (distributedRte->perminfoindex)
+ {
+ perminfo = getRTEPermissionInfo(query->rteperminfos, distributedRte);
+ }
+
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
- recursivePlanningContext);
+ recursivePlanningContext, perminfo);
+#else
+ ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
+ recursivePlanningContext, NULL);
+#endif
}
else if (distributedRte->rtekind == RTE_SUBQUERY)
{
@@ -1751,9 +1763,11 @@ NodeContainsSubqueryReferencingOuterQuery(Node *node)
void
ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
List *requiredAttrNumbers,
- RecursivePlanningContext *context)
+ RecursivePlanningContext *context,
+ RTEPermissionInfo *perminfo)
{
- Query *subquery = WrapRteRelationIntoSubquery(rangeTableEntry, requiredAttrNumbers);
+ Query *subquery = WrapRteRelationIntoSubquery(rangeTableEntry, requiredAttrNumbers,
+ perminfo);
List *outerQueryTargetList = CreateAllTargetListForRelation(rangeTableEntry->relid,
requiredAttrNumbers);
@@ -1778,6 +1792,9 @@ ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
/* replace the function with the constructed subquery */
rangeTableEntry->rtekind = RTE_SUBQUERY;
+#if PG_VERSION_NUM >= PG_VERSION_16
+ rangeTableEntry->perminfoindex = 0;
+#endif
rangeTableEntry->subquery = subquery;
/*
@@ -1850,6 +1867,15 @@ CreateOuterSubquery(RangeTblEntry *rangeTableEntry, List *outerSubqueryTargetLis
innerSubqueryRTE->eref->colnames = innerSubqueryColNames;
outerSubquery->rtable = list_make1(innerSubqueryRTE);
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /* sanity check */
+ Assert(innerSubqueryRTE->rtekind == RTE_SUBQUERY &&
+ innerSubqueryRTE->perminfoindex == 0);
+ outerSubquery->rteperminfos = NIL;
+#endif
+
+
/* set the FROM expression to the subquery */
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
newRangeTableRef->rtindex = 1;
@@ -2022,6 +2048,15 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
/* set the FROM expression to the subquery */
subquery->rtable = list_make1(newRangeTableEntry);
+
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /* sanity check */
+ Assert(newRangeTableEntry->rtekind == RTE_FUNCTION &&
+ newRangeTableEntry->perminfoindex == 0);
+ subquery->rteperminfos = NIL;
+#endif
+
newRangeTableRef->rtindex = 1;
subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
@@ -2392,6 +2427,9 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList,
Query *resultQuery = makeNode(Query);
resultQuery->commandType = CMD_SELECT;
resultQuery->rtable = list_make1(rangeTableEntry);
+#if PG_VERSION_NUM >= PG_VERSION_16
+ resultQuery->rteperminfos = NIL;
+#endif
resultQuery->jointree = joinTree;
resultQuery->targetList = targetList;
diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c
index b57e37735..368ba2026 100644
--- a/src/backend/distributed/planner/relation_restriction_equivalence.c
+++ b/src/backend/distributed/planner/relation_restriction_equivalence.c
@@ -1238,6 +1238,12 @@ AddToAttributeEquivalenceClass(AttributeEquivalenceClass *attributeEquivalenceCl
return;
}
+ /* outer join checks in PG16 */
+ if (IsRelOptOuterJoin(root, varToBeAdded->varno))
+ {
+ return;
+ }
+
RangeTblEntry *rangeTableEntry = root->simple_rte_array[varToBeAdded->varno];
if (rangeTableEntry->rtekind == RTE_RELATION)
{
@@ -1379,6 +1385,30 @@ GetTargetSubquery(PlannerInfo *root, RangeTblEntry *rangeTableEntry, Var *varToB
}
+/*
+ * IsRelOptOuterJoin returns true if the RelOpt referenced
+ * by varNo is an outer join, false otherwise.
+ */
+bool
+IsRelOptOuterJoin(PlannerInfo *root, int varNo)
+{
+#if PG_VERSION_NUM >= PG_VERSION_16
+ if (root->simple_rel_array_size <= varNo)
+ {
+ return true;
+ }
+
+ RelOptInfo *rel = root->simple_rel_array[varNo];
+ if (rel == NULL)
+ {
+ /* must be an outer join */
+ return true;
+ }
+#endif
+ return false;
+}
+
+
/*
* AddUnionAllSetOperationsToAttributeEquivalenceClass recursively iterates on all the
* append rels, sets the varno's accordingly and adds the
diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c
index b7629a38b..3284ead11 100644
--- a/src/backend/distributed/relay/relay_event_utility.c
+++ b/src/backend/distributed/relay/relay_event_utility.c
@@ -54,6 +54,9 @@
#include "utils/relcache.h"
/* Local functions forward declarations */
+static void RelayEventExtendConstraintAndIndexNames(AlterTableStmt *alterTableStmt,
+ Constraint *constraint,
+ uint64 shardId);
static bool UpdateWholeRowColumnReferencesWalker(Node *node, uint64 *shardId);
/* exports for SQL callable functions */
@@ -150,43 +153,17 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
if (command->subtype == AT_AddConstraint)
{
Constraint *constraint = (Constraint *) command->def;
- char **constraintName = &(constraint->conname);
- const bool missingOk = false;
- relationId = RangeVarGetRelid(alterTableStmt->relation,
- AccessShareLock,
- missingOk);
-
- if (constraint->indexname)
+ RelayEventExtendConstraintAndIndexNames(alterTableStmt, constraint,
+ shardId);
+ }
+ else if (command->subtype == AT_AddColumn)
+ {
+ ColumnDef *columnDefinition = (ColumnDef *) command->def;
+ Constraint *constraint = NULL;
+ foreach_ptr(constraint, columnDefinition->constraints)
{
- char **indexName = &(constraint->indexname);
- AppendShardIdToName(indexName, shardId);
- }
-
- /*
- * Append shardId to constraint names if
- * - table is not partitioned or
- * - constraint is not a CHECK constraint
- *
- * We do not want to append shardId to partitioned table shards because
- * the names of constraints will be inherited, and the shardId will no
- * longer be valid for the child table.
- *
- * See MergeConstraintsIntoExisting function in Postgres that requires
- * inherited check constraints in child tables to have the same name
- * with those in parent tables.
- */
- if (!PartitionedTable(relationId) ||
- constraint->contype != CONSTR_CHECK)
- {
- /*
- * constraint->conname could be empty in the case of
- * ADD {PRIMARY KEY, UNIQUE} USING INDEX.
- * In this case, already extended index name will be used by postgres.
- */
- if (constraint->conname != NULL)
- {
- AppendShardIdToName(constraintName, shardId);
- }
+ RelayEventExtendConstraintAndIndexNames(alterTableStmt,
+ constraint, shardId);
}
}
else if (command->subtype == AT_DropConstraint ||
@@ -622,6 +599,56 @@ RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId)
}
+/*
+ * RelayEventExtendConstraintAndIndexNames extends the names of constraints
+ * and indexes in given constraint with the shardId.
+ */
+static void
+RelayEventExtendConstraintAndIndexNames(AlterTableStmt *alterTableStmt,
+ Constraint *constraint,
+ uint64 shardId)
+{
+ char **constraintName = &(constraint->conname);
+ const bool missingOk = false;
+ Oid relationId = RangeVarGetRelid(alterTableStmt->relation,
+ AccessShareLock,
+ missingOk);
+
+ if (constraint->indexname)
+ {
+ char **indexName = &(constraint->indexname);
+ AppendShardIdToName(indexName, shardId);
+ }
+
+ /*
+ * Append shardId to constraint names if
+ * - table is not partitioned or
+ * - constraint is not a CHECK constraint
+ *
+ * We do not want to append shardId to partitioned table shards because
+ * the names of constraints will be inherited, and the shardId will no
+ * longer be valid for the child table.
+ *
+ * See MergeConstraintsIntoExisting function in Postgres that requires
+ * inherited check constraints in child tables to have the same name
+ * with those in parent tables.
+ */
+ if (!PartitionedTable(relationId) ||
+ constraint->contype != CONSTR_CHECK)
+ {
+ /*
+ * constraint->conname could be empty in the case of
+ * ADD {PRIMARY KEY, UNIQUE} USING INDEX.
+ * In this case, already extended index name will be used by postgres.
+ */
+ if (constraint->conname != NULL)
+ {
+ AppendShardIdToName(constraintName, shardId);
+ }
+ }
+}
+
+
/*
* RelayEventExtendNamesForInterShardCommands extends relation names in the given parse
* tree for certain utility commands. The function more specifically extends table, index
@@ -660,13 +687,6 @@ RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId,
}
else if (command->subtype == AT_AddColumn)
{
- /*
- * TODO: This code path will never be executed since we do not
- * support foreign constraint creation via
- * ALTER TABLE %s ADD COLUMN %s [constraint]. However, the code
- * is kept in case we fix the constraint creation without a name
- * and allow foreign key creation with the mentioned command.
- */
ColumnDef *columnDefinition = (ColumnDef *) command->def;
List *columnConstraints = columnDefinition->constraints;
diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c
index 550095875..c8d0325b6 100644
--- a/src/backend/distributed/replication/multi_logical_replication.c
+++ b/src/backend/distributed/replication/multi_logical_replication.c
@@ -1530,7 +1530,23 @@ CreateSubscriptions(MultiConnection *sourceConnection,
appendStringInfo(createSubscriptionCommand,
"CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s "
"WITH (citus_use_authinfo=true, create_slot=false, "
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+ /*
+ * password_required specifies whether connections to the publisher
+ * made as a result of this subscription must use password authentication.
+ * However, this setting is ignored when the subscription is owned
+ * by a superuser.
+ * Given that this command is executed below with superuser
+ * ExecuteCriticalRemoteCommand(target->superuserConnection,
+ * createSubscriptionCommand->data);
+ * We are safe to pass password_required as false because
+ * it will be ignored anyway
+ */
+ "copy_data=false, enabled=false, slot_name=%s, password_required=false",
+#else
"copy_data=false, enabled=false, slot_name=%s",
+#endif
quote_identifier(target->subscriptionName),
quote_literal_cstr(conninfo->data),
quote_identifier(target->publication->name),
diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c
index 2493a8ea9..e5d593295 100644
--- a/src/backend/distributed/shared_library_init.c
+++ b/src/backend/distributed/shared_library_init.c
@@ -104,10 +104,13 @@
#include "replication/walsender.h"
#include "storage/ipc.h"
#include "optimizer/planner.h"
+#include "optimizer/plancat.h"
#include "optimizer/paths.h"
#include "tcop/tcopprot.h"
#include "utils/guc.h"
#include "utils/guc_tables.h"
+#include "utils/inval.h"
+#include "utils/lsyscache.h"
#include "utils/syscache.h"
#include "utils/varlena.h"
@@ -452,6 +455,7 @@ _PG_init(void)
/* register for planner hook */
set_rel_pathlist_hook = multi_relation_restriction_hook;
+ get_relation_info_hook = multi_get_relation_info_hook;
set_join_pathlist_hook = multi_join_restriction_hook;
ExecutorStart_hook = CitusExecutorStart;
ExecutorRun_hook = CitusExecutorRun;
@@ -552,6 +556,9 @@ _PG_init(void)
"ColumnarSupportsIndexAM",
true, &handle);
+ CacheRegisterRelcacheCallback(InvalidateDistRelationCacheCallback,
+ (Datum) 0);
+
INIT_COLUMNAR_SYMBOL(CompressionTypeStr_type, CompressionTypeStr);
INIT_COLUMNAR_SYMBOL(IsColumnarTableAmTable_type, IsColumnarTableAmTable);
INIT_COLUMNAR_SYMBOL(ReadColumnarOptions_type, ReadColumnarOptions);
@@ -1449,7 +1456,7 @@ RegisterCitusConfigVariables(void)
"and operating system name. This configuration value controls "
"whether these reports are sent."),
&EnableStatisticsCollection,
-#if defined(HAVE_LIBCURL)
+#if defined(HAVE_LIBCURL) && defined(ENABLE_CITUS_STATISTICS_COLLECTION)
true,
#else
false,
@@ -2650,8 +2657,8 @@ RegisterCitusConfigVariables(void)
static void
OverridePostgresConfigProperties(void)
{
- struct config_generic **guc_vars = get_guc_variables();
- int gucCount = GetNumConfigOptions();
+ int gucCount = 0;
+ struct config_generic **guc_vars = get_guc_variables_compat(&gucCount);
for (int gucIndex = 0; gucIndex < gucCount; gucIndex++)
{
@@ -2810,7 +2817,7 @@ ShowShardsForAppNamePrefixesCheckHook(char **newval, void **extra, GucSource sou
}
char *prefixAscii = pstrdup(appNamePrefix);
- pg_clean_ascii(prefixAscii);
+ pg_clean_ascii_compat(prefixAscii, 0);
if (strcmp(prefixAscii, appNamePrefix) != 0)
{
diff --git a/src/backend/distributed/sql/citus--11.3-1--11.3-2.sql b/src/backend/distributed/sql/citus--11.3-1--11.3-2.sql
new file mode 100644
index 000000000..e7f0864f1
--- /dev/null
+++ b/src/backend/distributed/sql/citus--11.3-1--11.3-2.sql
@@ -0,0 +1,9 @@
+DROP VIEW citus_shards;
+DROP VIEW IF EXISTS pg_catalog.citus_tables;
+DROP VIEW IF EXISTS public.citus_tables;
+DROP FUNCTION citus_shard_sizes;
+
+#include "udfs/citus_shard_sizes/11.3-2.sql"
+
+#include "udfs/citus_shards/11.3-2.sql"
+#include "udfs/citus_tables/11.3-2.sql"
diff --git a/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql b/src/backend/distributed/sql/citus--11.3-2--12.0-1.sql
similarity index 91%
rename from src/backend/distributed/sql/citus--11.3-1--12.0-1.sql
rename to src/backend/distributed/sql/citus--11.3-2--12.0-1.sql
index fac95dbd4..ea0de6534 100644
--- a/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql
+++ b/src/backend/distributed/sql/citus--11.3-2--12.0-1.sql
@@ -24,13 +24,8 @@ GRANT SELECT ON pg_catalog.pg_dist_schema TO public;
#include "udfs/citus_internal_unregister_tenant_schema_globally/12.0-1.sql"
#include "udfs/citus_drop_trigger/12.0-1.sql"
-DROP VIEW citus_shards;
-DROP VIEW IF EXISTS pg_catalog.citus_tables;
-DROP VIEW IF EXISTS public.citus_tables;
-DROP FUNCTION citus_shard_sizes;
-#include "udfs/citus_shard_sizes/12.0-1.sql"
-
#include "udfs/citus_tables/12.0-1.sql"
+DROP VIEW citus_shards;
#include "udfs/citus_shards/12.0-1.sql"
#include "udfs/citus_schemas/12.0-1.sql"
diff --git a/src/backend/distributed/sql/citus--12.0-1--12.1-1.sql b/src/backend/distributed/sql/citus--12.0-1--12.1-1.sql
new file mode 100644
index 000000000..4e2a515a3
--- /dev/null
+++ b/src/backend/distributed/sql/citus--12.0-1--12.1-1.sql
@@ -0,0 +1,12 @@
+-- citus--12.0-1--12.1-1
+
+-- bump version to 12.1-1
+
+#include "udfs/citus_pause_node_within_txn/12.1-1.sql"
+#include "udfs/citus_prepare_pg_upgrade/12.1-1.sql"
+#include "udfs/citus_finish_pg_upgrade/12.1-1.sql"
+
+#include "udfs/citus_internal_update_none_dist_table_metadata/12.1-1.sql"
+#include "udfs/citus_internal_delete_placement_metadata/12.1-1.sql"
+
+#include "udfs/citus_schema_move/12.1-1.sql"
diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql
new file mode 100644
index 000000000..bb9d22969
--- /dev/null
+++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql
@@ -0,0 +1,3 @@
+-- citus--12.1-1--12.2-1
+
+-- bump version to 12.2-1
diff --git a/src/backend/distributed/sql/downgrades/citus--11.3-2--11.3-1.sql b/src/backend/distributed/sql/downgrades/citus--11.3-2--11.3-1.sql
new file mode 100644
index 000000000..78dacd59a
--- /dev/null
+++ b/src/backend/distributed/sql/downgrades/citus--11.3-2--11.3-1.sql
@@ -0,0 +1,13 @@
+DROP VIEW IF EXISTS public.citus_tables;
+DROP VIEW IF EXISTS pg_catalog.citus_tables;
+
+DROP VIEW pg_catalog.citus_shards;
+DROP FUNCTION pg_catalog.citus_shard_sizes;
+#include "../udfs/citus_shard_sizes/10.0-1.sql"
+-- citus_shards/11.1-1.sql tries to create citus_shards in pg_catalog but it is not allowed.
+-- Here we use citus_shards/10.0-1.sql to properly create the view in citus schema and
+-- then alter it to pg_catalog, so citus_shards/11.1-1.sql can REPLACE it without any errors.
+#include "../udfs/citus_shards/10.0-1.sql"
+
+#include "../udfs/citus_tables/11.1-1.sql"
+#include "../udfs/citus_shards/11.1-1.sql"
diff --git a/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql b/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-2.sql
similarity index 85%
rename from src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql
rename to src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-2.sql
index c391837f4..b410a4069 100644
--- a/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql
+++ b/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-2.sql
@@ -51,15 +51,9 @@ DROP VIEW IF EXISTS public.citus_tables;
DROP VIEW IF EXISTS pg_catalog.citus_tables;
DROP VIEW pg_catalog.citus_shards;
-DROP FUNCTION pg_catalog.citus_shard_sizes;
-#include "../udfs/citus_shard_sizes/10.0-1.sql"
--- citus_shards/11.1-1.sql tries to create citus_shards in pg_catalog but it is not allowed.
--- Here we use citus_shards/10.0-1.sql to properly create the view in citus schema and
--- then alter it to pg_catalog, so citus_shards/11.1-1.sql can REPLACE it without any errors.
-#include "../udfs/citus_shards/10.0-1.sql"
-#include "../udfs/citus_tables/11.1-1.sql"
-#include "../udfs/citus_shards/11.1-1.sql"
+#include "../udfs/citus_tables/11.3-2.sql"
+#include "../udfs/citus_shards/11.3-2.sql"
DROP TABLE pg_catalog.pg_dist_schema;
diff --git a/src/backend/distributed/sql/downgrades/citus--12.1-1--12.0-1.sql b/src/backend/distributed/sql/downgrades/citus--12.1-1--12.0-1.sql
new file mode 100644
index 000000000..6f58b2f54
--- /dev/null
+++ b/src/backend/distributed/sql/downgrades/citus--12.1-1--12.0-1.sql
@@ -0,0 +1,24 @@
+-- citus--12.1-1--12.0-1
+DROP FUNCTION pg_catalog.citus_pause_node_within_txn(int,bool,int);
+-- we have modified the relevant upgrade script to include any_value changes
+-- we don't need to upgrade this downgrade path for any_value changes
+-- since if we are doing a Citus downgrade, not PG downgrade, then it would be no-op.
+
+DROP FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
+ relation_id oid, replication_model "char", colocation_id bigint,
+ auto_converted boolean
+);
+
+DROP FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
+ placement_id bigint
+);
+
+DROP FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace, target_node_name text, target_node_port integer,
+ shard_transfer_mode citus.shard_transfer_mode
+);
+
+DROP FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace, target_node_id integer,
+ shard_transfer_mode citus.shard_transfer_mode
+);
diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql
new file mode 100644
index 000000000..b26fc16bc
--- /dev/null
+++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql
@@ -0,0 +1,2 @@
+-- citus--12.2-1--12.1-1
+-- this is an empty downgrade path since citus--12.2-1--12.1-1.sql is empty for now
diff --git a/src/backend/distributed/sql/udfs/any_value/9.1-1.sql b/src/backend/distributed/sql/udfs/any_value/9.1-1.sql
index 7eb9fdb25..4a7748fea 100644
--- a/src/backend/distributed/sql/udfs/any_value/9.1-1.sql
+++ b/src/backend/distributed/sql/udfs/any_value/9.1-1.sql
@@ -1,7 +1,13 @@
+DO $proc$
+BEGIN
+-- PG16 has its own any_value, so only create it pre PG16.
+IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
+ EXECUTE $$
+
CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement )
-RETURNS anyelement AS $$
+RETURNS anyelement AS $agg$
SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END;
-$$ LANGUAGE SQL STABLE;
+$agg$ LANGUAGE SQL STABLE;
CREATE AGGREGATE pg_catalog.any_value (
sfunc = pg_catalog.any_value_agg,
@@ -12,3 +18,6 @@ CREATE AGGREGATE pg_catalog.any_value (
COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS
'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.';
+ $$;
+END IF;
+END $proc$;
diff --git a/src/backend/distributed/sql/udfs/any_value/latest.sql b/src/backend/distributed/sql/udfs/any_value/latest.sql
index 7eb9fdb25..4a7748fea 100644
--- a/src/backend/distributed/sql/udfs/any_value/latest.sql
+++ b/src/backend/distributed/sql/udfs/any_value/latest.sql
@@ -1,7 +1,13 @@
+DO $proc$
+BEGIN
+-- PG16 has its own any_value, so only create it pre PG16.
+IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
+ EXECUTE $$
+
CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement )
-RETURNS anyelement AS $$
+RETURNS anyelement AS $agg$
SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END;
-$$ LANGUAGE SQL STABLE;
+$agg$ LANGUAGE SQL STABLE;
CREATE AGGREGATE pg_catalog.any_value (
sfunc = pg_catalog.any_value_agg,
@@ -12,3 +18,6 @@ CREATE AGGREGATE pg_catalog.any_value (
COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS
'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.';
+ $$;
+END IF;
+END $proc$;
diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.1-1.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.1-1.sql
new file mode 100644
index 000000000..766e86a2e
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.1-1.sql
@@ -0,0 +1,220 @@
+CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade()
+ RETURNS void
+ LANGUAGE plpgsql
+ SET search_path = pg_catalog
+ AS $cppu$
+DECLARE
+ table_name regclass;
+ command text;
+ trigger_name text;
+BEGIN
+
+
+ IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN
+ EXECUTE $cmd$
+ -- disable propagation to prevent EnsureCoordinator errors
+ -- the aggregate created here does not depend on Citus extension (yet)
+ -- since we add the dependency with the next command
+ SET citus.enable_ddl_propagation TO OFF;
+ CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray);
+ COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray)
+ IS 'concatenate input arrays into a single array';
+ RESET citus.enable_ddl_propagation;
+ $cmd$;
+ ELSE
+ EXECUTE $cmd$
+ SET citus.enable_ddl_propagation TO OFF;
+ CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray);
+ COMMENT ON AGGREGATE array_cat_agg(anyarray)
+ IS 'concatenate input arrays into a single array';
+ RESET citus.enable_ddl_propagation;
+ $cmd$;
+ END IF;
+
+ --
+ -- Citus creates the array_cat_agg but because of a compatibility
+ -- issue between pg13-pg14, we drop and create it during upgrade.
+ -- And as Citus creates it, there needs to be a dependency to the
+ -- Citus extension, so we create that dependency here.
+ -- We are not using:
+ -- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg
+ -- because we don't have an easy way to check if the aggregate
+ -- exists with anyarray type or anycompatiblearray type.
+
+ INSERT INTO pg_depend
+ SELECT
+ 'pg_proc'::regclass::oid as classid,
+ (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid,
+ 0 as objsubid,
+ 'pg_extension'::regclass::oid as refclassid,
+ (select oid from pg_extension where extname = 'citus') as refobjid,
+ 0 as refobjsubid ,
+ 'e' as deptype;
+
+ -- PG16 has its own any_value, so only create it pre PG16.
+ -- We can remove this part when we drop support for PG16
+ IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
+ EXECUTE $cmd$
+ -- disable propagation to prevent EnsureCoordinator errors
+ -- the aggregate created here does not depend on Citus extension (yet)
+ -- since we add the dependency with the next command
+ SET citus.enable_ddl_propagation TO OFF;
+ CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement )
+ RETURNS anyelement AS $$
+ SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END;
+ $$ LANGUAGE SQL STABLE;
+
+ CREATE AGGREGATE pg_catalog.any_value (
+ sfunc = pg_catalog.any_value_agg,
+ combinefunc = pg_catalog.any_value_agg,
+ basetype = anyelement,
+ stype = anyelement
+ );
+ COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS
+ 'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.';
+ RESET citus.enable_ddl_propagation;
+ --
+ -- Citus creates the any_value aggregate but because of a compatibility
+ -- issue between pg15-pg16 -- any_value is created in PG16, we drop
+ -- and create it during upgrade IF upgraded version is less than 16.
+ -- And as Citus creates it, there needs to be a dependency to the
+ -- Citus extension, so we create that dependency here.
+
+ INSERT INTO pg_depend
+ SELECT
+ 'pg_proc'::regclass::oid as classid,
+ (SELECT oid FROM pg_proc WHERE proname = 'any_value_agg') as objid,
+ 0 as objsubid,
+ 'pg_extension'::regclass::oid as refclassid,
+ (select oid from pg_extension where extname = 'citus') as refobjid,
+ 0 as refobjsubid ,
+ 'e' as deptype;
+
+ INSERT INTO pg_depend
+ SELECT
+ 'pg_proc'::regclass::oid as classid,
+ (SELECT oid FROM pg_proc WHERE proname = 'any_value') as objid,
+ 0 as objsubid,
+ 'pg_extension'::regclass::oid as refclassid,
+ (select oid from pg_extension where extname = 'citus') as refobjid,
+ 0 as refobjsubid ,
+ 'e' as deptype;
+ $cmd$;
+ END IF;
+
+ --
+ -- restore citus catalog tables
+ --
+ INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
+
+ -- if we are upgrading from PG14/PG15 to PG16+,
+ -- we need to regenerate the partkeys because they will include varnullingrels as well.
+ UPDATE pg_catalog.pg_dist_partition
+ SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name)
+ FROM public.pg_dist_partkeys_pre_16_upgrade
+ WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
+ DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
+
+ INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
+ INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
+ INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
+ INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node;
+ INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group;
+ INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction;
+ INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation;
+ INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup;
+ INSERT INTO pg_catalog.pg_dist_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_schema;
+ -- enterprise catalog tables
+ INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo;
+ INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo;
+
+ INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT
+ name,
+ default_strategy,
+ shard_cost_function::regprocedure::regproc,
+ node_capacity_function::regprocedure::regproc,
+ shard_allowed_on_node_function::regprocedure::regproc,
+ default_threshold,
+ minimum_threshold,
+ improvement_threshold
+ FROM public.pg_dist_rebalance_strategy;
+
+ --
+ -- drop backup tables
+ --
+ DROP TABLE public.pg_dist_authinfo;
+ DROP TABLE public.pg_dist_colocation;
+ DROP TABLE public.pg_dist_local_group;
+ DROP TABLE public.pg_dist_node;
+ DROP TABLE public.pg_dist_node_metadata;
+ DROP TABLE public.pg_dist_partition;
+ DROP TABLE public.pg_dist_placement;
+ DROP TABLE public.pg_dist_poolinfo;
+ DROP TABLE public.pg_dist_shard;
+ DROP TABLE public.pg_dist_transaction;
+ DROP TABLE public.pg_dist_rebalance_strategy;
+ DROP TABLE public.pg_dist_cleanup;
+ DROP TABLE public.pg_dist_schema;
+ --
+ -- reset sequences
+ --
+ PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false);
+ PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false);
+ PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false);
+ PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false);
+ PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false);
+ PERFORM setval('pg_catalog.pg_dist_operationid_seq', (SELECT MAX(operation_id)+1 AS max_operation_id FROM pg_dist_cleanup), false);
+ PERFORM setval('pg_catalog.pg_dist_cleanup_recordid_seq', (SELECT MAX(record_id)+1 AS max_record_id FROM pg_dist_cleanup), false);
+ PERFORM setval('pg_catalog.pg_dist_clock_logical_seq', (SELECT last_value FROM public.pg_dist_clock_logical_seq), false);
+ DROP TABLE public.pg_dist_clock_logical_seq;
+
+
+
+ --
+ -- register triggers
+ --
+ FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition JOIN pg_class ON (logicalrelid = oid) WHERE relkind <> 'f'
+ LOOP
+ trigger_name := 'truncate_trigger_' || table_name::oid;
+ command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()';
+ EXECUTE command;
+ command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name);
+ EXECUTE command;
+ END LOOP;
+
+ --
+ -- set dependencies
+ --
+ INSERT INTO pg_depend
+ SELECT
+ 'pg_class'::regclass::oid as classid,
+ p.logicalrelid::regclass::oid as objid,
+ 0 as objsubid,
+ 'pg_extension'::regclass::oid as refclassid,
+ (select oid from pg_extension where extname = 'citus') as refobjid,
+ 0 as refobjsubid ,
+ 'n' as deptype
+ FROM pg_catalog.pg_dist_partition p;
+
+ -- set dependencies for columnar table access method
+ PERFORM columnar_internal.columnar_ensure_am_depends_catalog();
+
+ -- restore pg_dist_object from the stable identifiers
+ TRUNCATE pg_catalog.pg_dist_object;
+ INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid)
+ SELECT
+ address.classid,
+ address.objid,
+ address.objsubid,
+ naming.distribution_argument_index,
+ naming.colocationid
+ FROM
+ public.pg_dist_object naming,
+ pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address;
+
+ DROP TABLE public.pg_dist_object;
+END;
+$cppu$;
+
+COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade()
+ IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade';
diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql
index 6dd46607a..766e86a2e 100644
--- a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql
+++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql
@@ -51,10 +51,70 @@ BEGIN
0 as refobjsubid ,
'e' as deptype;
+ -- PG16 has its own any_value, so only create it pre PG16.
+ -- We can remove this part when we drop support for PG16
+ IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN
+ EXECUTE $cmd$
+ -- disable propagation to prevent EnsureCoordinator errors
+ -- the aggregate created here does not depend on Citus extension (yet)
+ -- since we add the dependency with the next command
+ SET citus.enable_ddl_propagation TO OFF;
+ CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement )
+ RETURNS anyelement AS $$
+ SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END;
+ $$ LANGUAGE SQL STABLE;
+
+ CREATE AGGREGATE pg_catalog.any_value (
+ sfunc = pg_catalog.any_value_agg,
+ combinefunc = pg_catalog.any_value_agg,
+ basetype = anyelement,
+ stype = anyelement
+ );
+ COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS
+ 'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.';
+ RESET citus.enable_ddl_propagation;
+ --
+ -- Citus creates the any_value aggregate but because of a compatibility
+ -- issue between pg15-pg16 -- any_value is created in PG16, we drop
+ -- and create it during upgrade IF upgraded version is less than 16.
+ -- And as Citus creates it, there needs to be a dependency to the
+ -- Citus extension, so we create that dependency here.
+
+ INSERT INTO pg_depend
+ SELECT
+ 'pg_proc'::regclass::oid as classid,
+ (SELECT oid FROM pg_proc WHERE proname = 'any_value_agg') as objid,
+ 0 as objsubid,
+ 'pg_extension'::regclass::oid as refclassid,
+ (select oid from pg_extension where extname = 'citus') as refobjid,
+ 0 as refobjsubid ,
+ 'e' as deptype;
+
+ INSERT INTO pg_depend
+ SELECT
+ 'pg_proc'::regclass::oid as classid,
+ (SELECT oid FROM pg_proc WHERE proname = 'any_value') as objid,
+ 0 as objsubid,
+ 'pg_extension'::regclass::oid as refclassid,
+ (select oid from pg_extension where extname = 'citus') as refobjid,
+ 0 as refobjsubid ,
+ 'e' as deptype;
+ $cmd$;
+ END IF;
+
--
-- restore citus catalog tables
--
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
+
+ -- if we are upgrading from PG14/PG15 to PG16+,
+ -- we need to regenerate the partkeys because they will include varnullingrels as well.
+ UPDATE pg_catalog.pg_dist_partition
+ SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name)
+ FROM public.pg_dist_partkeys_pre_16_upgrade
+ WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
+ DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
+
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;
diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.1-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.1-1.sql
new file mode 100644
index 000000000..5af65f0be
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.1-1.sql
@@ -0,0 +1,9 @@
+CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
+ placement_id bigint)
+RETURNS void
+LANGUAGE C
+VOLATILE
+AS 'MODULE_PATHNAME',
+$$citus_internal_delete_placement_metadata$$;
+COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint)
+ IS 'Delete placement with given id from pg_dist_placement metadata table.';
diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql
new file mode 100644
index 000000000..5af65f0be
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql
@@ -0,0 +1,9 @@
+CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata(
+ placement_id bigint)
+RETURNS void
+LANGUAGE C
+VOLATILE
+AS 'MODULE_PATHNAME',
+$$citus_internal_delete_placement_metadata$$;
+COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint)
+ IS 'Delete placement with given id from pg_dist_placement metadata table.';
diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.1-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.1-1.sql
new file mode 100644
index 000000000..bcd05d8d0
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.1-1.sql
@@ -0,0 +1,11 @@
+CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
+ relation_id oid,
+ replication_model "char",
+ colocation_id bigint,
+ auto_converted boolean)
+RETURNS void
+LANGUAGE C
+VOLATILE
+AS 'MODULE_PATHNAME';
+COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean)
+ IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql
new file mode 100644
index 000000000..bcd05d8d0
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql
@@ -0,0 +1,11 @@
+CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(
+ relation_id oid,
+ replication_model "char",
+ colocation_id bigint,
+ auto_converted boolean)
+RETURNS void
+LANGUAGE C
+VOLATILE
+AS 'MODULE_PATHNAME';
+COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean)
+ IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.';
diff --git a/src/backend/distributed/sql/udfs/citus_pause_node_within_txn/12.1-1.sql b/src/backend/distributed/sql/udfs/citus_pause_node_within_txn/12.1-1.sql
new file mode 100644
index 000000000..9f81d6840
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_pause_node_within_txn/12.1-1.sql
@@ -0,0 +1,13 @@
+CREATE FUNCTION pg_catalog.citus_pause_node_within_txn(node_id int,
+ force bool DEFAULT false,
+ lock_cooldown int DEFAULT 10000)
+ RETURNS void
+ LANGUAGE C STRICT
+ AS 'MODULE_PATHNAME', $$citus_pause_node_within_txn$$;
+
+COMMENT ON FUNCTION pg_catalog.citus_pause_node_within_txn(node_id int,
+ force bool ,
+ lock_cooldown int )
+ IS 'pauses node with given id which leads to add lock in tables and prevent any queries to be executed on that node';
+
+REVOKE ALL ON FUNCTION pg_catalog.citus_pause_node_within_txn(int,bool,int) FROM PUBLIC;
diff --git a/src/backend/distributed/sql/udfs/citus_pause_node_within_txn/latest.sql b/src/backend/distributed/sql/udfs/citus_pause_node_within_txn/latest.sql
new file mode 100644
index 000000000..9f81d6840
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_pause_node_within_txn/latest.sql
@@ -0,0 +1,13 @@
+CREATE FUNCTION pg_catalog.citus_pause_node_within_txn(node_id int,
+ force bool DEFAULT false,
+ lock_cooldown int DEFAULT 10000)
+ RETURNS void
+ LANGUAGE C STRICT
+ AS 'MODULE_PATHNAME', $$citus_pause_node_within_txn$$;
+
+COMMENT ON FUNCTION pg_catalog.citus_pause_node_within_txn(node_id int,
+ force bool ,
+ lock_cooldown int )
+ IS 'pauses node with given id which leads to add lock in tables and prevent any queries to be executed on that node';
+
+REVOKE ALL ON FUNCTION pg_catalog.citus_pause_node_within_txn(int,bool,int) FROM PUBLIC;
diff --git a/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/12.1-1.sql b/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/12.1-1.sql
new file mode 100644
index 000000000..b4bc653f2
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/12.1-1.sql
@@ -0,0 +1,98 @@
+CREATE OR REPLACE FUNCTION pg_catalog.citus_prepare_pg_upgrade()
+ RETURNS void
+ LANGUAGE plpgsql
+ SET search_path = pg_catalog
+ AS $cppu$
+BEGIN
+
+ DELETE FROM pg_depend WHERE
+ objid IN (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') AND
+ refobjid IN (select oid from pg_extension where extname = 'citus');
+ --
+ -- We are dropping the aggregates because postgres 14 changed
+ -- array_cat type from anyarray to anycompatiblearray. When
+ -- upgrading to pg14, specifically when running pg_restore on
+ -- array_cat_agg we would get an error. So we drop the aggregate
+ -- and create the right one on citus_finish_pg_upgrade.
+
+ DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
+ DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
+
+ -- We should drop any_value because PG16 has its own any_value function
+ -- We can remove this part when we drop support for PG16
+ DELETE FROM pg_depend WHERE
+ objid IN (SELECT oid FROM pg_proc WHERE proname = 'any_value' OR proname = 'any_value_agg') AND
+ refobjid IN (select oid from pg_extension where extname = 'citus');
+ DROP AGGREGATE IF EXISTS pg_catalog.any_value(anyelement);
+ DROP FUNCTION IF EXISTS pg_catalog.any_value_agg(anyelement, anyelement);
+
+ --
+ -- Drop existing backup tables
+ --
+ DROP TABLE IF EXISTS public.pg_dist_partition;
+ DROP TABLE IF EXISTS public.pg_dist_shard;
+ DROP TABLE IF EXISTS public.pg_dist_placement;
+ DROP TABLE IF EXISTS public.pg_dist_node_metadata;
+ DROP TABLE IF EXISTS public.pg_dist_node;
+ DROP TABLE IF EXISTS public.pg_dist_local_group;
+ DROP TABLE IF EXISTS public.pg_dist_transaction;
+ DROP TABLE IF EXISTS public.pg_dist_colocation;
+ DROP TABLE IF EXISTS public.pg_dist_authinfo;
+ DROP TABLE IF EXISTS public.pg_dist_poolinfo;
+ DROP TABLE IF EXISTS public.pg_dist_rebalance_strategy;
+ DROP TABLE IF EXISTS public.pg_dist_object;
+ DROP TABLE IF EXISTS public.pg_dist_cleanup;
+ DROP TABLE IF EXISTS public.pg_dist_schema;
+ DROP TABLE IF EXISTS public.pg_dist_clock_logical_seq;
+
+ --
+ -- backup citus catalog tables
+ --
+ CREATE TABLE public.pg_dist_partition AS SELECT * FROM pg_catalog.pg_dist_partition;
+ CREATE TABLE public.pg_dist_shard AS SELECT * FROM pg_catalog.pg_dist_shard;
+ CREATE TABLE public.pg_dist_placement AS SELECT * FROM pg_catalog.pg_dist_placement;
+ CREATE TABLE public.pg_dist_node_metadata AS SELECT * FROM pg_catalog.pg_dist_node_metadata;
+ CREATE TABLE public.pg_dist_node AS SELECT * FROM pg_catalog.pg_dist_node;
+ CREATE TABLE public.pg_dist_local_group AS SELECT * FROM pg_catalog.pg_dist_local_group;
+ CREATE TABLE public.pg_dist_transaction AS SELECT * FROM pg_catalog.pg_dist_transaction;
+ CREATE TABLE public.pg_dist_colocation AS SELECT * FROM pg_catalog.pg_dist_colocation;
+ CREATE TABLE public.pg_dist_cleanup AS SELECT * FROM pg_catalog.pg_dist_cleanup;
+ -- save names of the tenant schemas instead of their oids because the oids might change after pg upgrade
+ CREATE TABLE public.pg_dist_schema AS SELECT schemaid::regnamespace::text AS schemaname, colocationid FROM pg_catalog.pg_dist_schema;
+ -- enterprise catalog tables
+ CREATE TABLE public.pg_dist_authinfo AS SELECT * FROM pg_catalog.pg_dist_authinfo;
+ CREATE TABLE public.pg_dist_poolinfo AS SELECT * FROM pg_catalog.pg_dist_poolinfo;
+ -- sequences
+ CREATE TABLE public.pg_dist_clock_logical_seq AS SELECT last_value FROM pg_catalog.pg_dist_clock_logical_seq;
+ CREATE TABLE public.pg_dist_rebalance_strategy AS SELECT
+ name,
+ default_strategy,
+ shard_cost_function::regprocedure::text,
+ node_capacity_function::regprocedure::text,
+ shard_allowed_on_node_function::regprocedure::text,
+ default_threshold,
+ minimum_threshold,
+ improvement_threshold
+ FROM pg_catalog.pg_dist_rebalance_strategy;
+
+ -- store upgrade stable identifiers on pg_dist_object catalog
+ CREATE TABLE public.pg_dist_object AS SELECT
+ address.type,
+ address.object_names,
+ address.object_args,
+ objects.distribution_argument_index,
+ objects.colocationid
+ FROM pg_catalog.pg_dist_object objects,
+ pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
+
+ -- if we are upgrading from PG14/PG15 to PG16+,
+ -- we will need to regenerate the partkeys because they will include varnullingrels as well.
+ -- so we save the partkeys as column names here
+ CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_16_upgrade AS
+ SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
+ FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varnullingrels%';
+END;
+$cppu$;
+
+COMMENT ON FUNCTION pg_catalog.citus_prepare_pg_upgrade()
+ IS 'perform tasks to copy citus settings to a location that could later be restored after pg_upgrade is done';
diff --git a/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/latest.sql b/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/latest.sql
index 31fa793a6..b4bc653f2 100644
--- a/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/latest.sql
+++ b/src/backend/distributed/sql/udfs/citus_prepare_pg_upgrade/latest.sql
@@ -17,6 +17,15 @@ BEGIN
DROP AGGREGATE IF EXISTS array_cat_agg(anyarray);
DROP AGGREGATE IF EXISTS array_cat_agg(anycompatiblearray);
+
+ -- We should drop any_value because PG16 has its own any_value function
+ -- We can remove this part when we drop support for PG16
+ DELETE FROM pg_depend WHERE
+ objid IN (SELECT oid FROM pg_proc WHERE proname = 'any_value' OR proname = 'any_value_agg') AND
+ refobjid IN (select oid from pg_extension where extname = 'citus');
+ DROP AGGREGATE IF EXISTS pg_catalog.any_value(anyelement);
+ DROP FUNCTION IF EXISTS pg_catalog.any_value_agg(anyelement, anyelement);
+
--
-- Drop existing backup tables
--
@@ -75,6 +84,13 @@ BEGIN
objects.colocationid
FROM pg_catalog.pg_dist_object objects,
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
+
+ -- if we are upgrading from PG14/PG15 to PG16+,
+ -- we will need to regenerate the partkeys because they will include varnullingrels as well.
+ -- so we save the partkeys as column names here
+ CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_16_upgrade AS
+ SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
+ FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varnullingrels%';
END;
$cppu$;
diff --git a/src/backend/distributed/sql/udfs/citus_schema_move/12.1-1.sql b/src/backend/distributed/sql/udfs/citus_schema_move/12.1-1.sql
new file mode 100644
index 000000000..8ca7e703f
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_schema_move/12.1-1.sql
@@ -0,0 +1,29 @@
+-- citus_schema_move, using target node name and node port
+CREATE OR REPLACE FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_name text,
+ target_node_port integer,
+ shard_transfer_mode citus.shard_transfer_mode default 'auto')
+RETURNS void
+LANGUAGE C STRICT
+AS 'MODULE_PATHNAME', $$citus_schema_move$$;
+COMMENT ON FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_name text,
+ target_node_port integer,
+ shard_transfer_mode citus.shard_transfer_mode)
+IS 'move a distributed schema to given node';
+
+-- citus_schema_move, using target node id
+CREATE OR REPLACE FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_id integer,
+ shard_transfer_mode citus.shard_transfer_mode default 'auto')
+RETURNS void
+LANGUAGE C STRICT
+AS 'MODULE_PATHNAME', $$citus_schema_move_with_nodeid$$;
+COMMENT ON FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_id integer,
+ shard_transfer_mode citus.shard_transfer_mode)
+IS 'move a distributed schema to given node';
diff --git a/src/backend/distributed/sql/udfs/citus_schema_move/latest.sql b/src/backend/distributed/sql/udfs/citus_schema_move/latest.sql
new file mode 100644
index 000000000..8ca7e703f
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_schema_move/latest.sql
@@ -0,0 +1,29 @@
+-- citus_schema_move, using target node name and node port
+CREATE OR REPLACE FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_name text,
+ target_node_port integer,
+ shard_transfer_mode citus.shard_transfer_mode default 'auto')
+RETURNS void
+LANGUAGE C STRICT
+AS 'MODULE_PATHNAME', $$citus_schema_move$$;
+COMMENT ON FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_name text,
+ target_node_port integer,
+ shard_transfer_mode citus.shard_transfer_mode)
+IS 'move a distributed schema to given node';
+
+-- citus_schema_move, using target node id
+CREATE OR REPLACE FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_id integer,
+ shard_transfer_mode citus.shard_transfer_mode default 'auto')
+RETURNS void
+LANGUAGE C STRICT
+AS 'MODULE_PATHNAME', $$citus_schema_move_with_nodeid$$;
+COMMENT ON FUNCTION pg_catalog.citus_schema_move(
+ schema_id regnamespace,
+ target_node_id integer,
+ shard_transfer_mode citus.shard_transfer_mode)
+IS 'move a distributed schema to given node';
diff --git a/src/backend/distributed/sql/udfs/citus_shard_sizes/12.0-1.sql b/src/backend/distributed/sql/udfs/citus_shard_sizes/11.3-2.sql
similarity index 100%
rename from src/backend/distributed/sql/udfs/citus_shard_sizes/12.0-1.sql
rename to src/backend/distributed/sql/udfs/citus_shard_sizes/11.3-2.sql
diff --git a/src/backend/distributed/sql/udfs/citus_shards/11.3-2.sql b/src/backend/distributed/sql/udfs/citus_shards/11.3-2.sql
new file mode 100644
index 000000000..3b08a5463
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_shards/11.3-2.sql
@@ -0,0 +1,46 @@
+CREATE OR REPLACE VIEW citus.citus_shards AS
+SELECT
+ pg_dist_shard.logicalrelid AS table_name,
+ pg_dist_shard.shardid,
+ shard_name(pg_dist_shard.logicalrelid, pg_dist_shard.shardid) as shard_name,
+ CASE WHEN partkey IS NOT NULL THEN 'distributed' WHEN repmodel = 't' THEN 'reference' ELSE 'local' END AS citus_table_type,
+ colocationid AS colocation_id,
+ pg_dist_node.nodename,
+ pg_dist_node.nodeport,
+ size as shard_size
+FROM
+ pg_dist_shard
+JOIN
+ pg_dist_placement
+ON
+ pg_dist_shard.shardid = pg_dist_placement.shardid
+JOIN
+ pg_dist_node
+ON
+ pg_dist_placement.groupid = pg_dist_node.groupid
+JOIN
+ pg_dist_partition
+ON
+ pg_dist_partition.logicalrelid = pg_dist_shard.logicalrelid
+LEFT JOIN
+ (SELECT shard_id, max(size) as size from citus_shard_sizes() GROUP BY shard_id) as shard_sizes
+ON
+ pg_dist_shard.shardid = shard_sizes.shard_id
+WHERE
+ pg_dist_placement.shardstate = 1
+AND
+ -- filter out tables owned by extensions
+ pg_dist_partition.logicalrelid NOT IN (
+ SELECT
+ objid
+ FROM
+ pg_depend
+ WHERE
+ classid = 'pg_class'::regclass AND refclassid = 'pg_extension'::regclass AND deptype = 'e'
+ )
+ORDER BY
+ pg_dist_shard.logicalrelid::text, shardid
+;
+
+ALTER VIEW citus.citus_shards SET SCHEMA pg_catalog;
+GRANT SELECT ON pg_catalog.citus_shards TO public;
diff --git a/src/backend/distributed/sql/udfs/citus_tables/11.3-2.sql b/src/backend/distributed/sql/udfs/citus_tables/11.3-2.sql
new file mode 100644
index 000000000..ead0b4923
--- /dev/null
+++ b/src/backend/distributed/sql/udfs/citus_tables/11.3-2.sql
@@ -0,0 +1,55 @@
+DO $$
+declare
+citus_tables_create_query text;
+BEGIN
+citus_tables_create_query=$CTCQ$
+ CREATE OR REPLACE VIEW %I.citus_tables AS
+ SELECT
+ logicalrelid AS table_name,
+ CASE WHEN partkey IS NOT NULL THEN 'distributed' ELSE
+ CASE when repmodel = 't' THEN 'reference' ELSE 'local' END
+ END AS citus_table_type,
+ coalesce(column_to_column_name(logicalrelid, partkey), '') AS distribution_column,
+ colocationid AS colocation_id,
+ pg_size_pretty(table_sizes.table_size) AS table_size,
+ (select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count,
+ pg_get_userbyid(relowner) AS table_owner,
+ amname AS access_method
+ FROM
+ pg_dist_partition p
+ JOIN
+ pg_class c ON (p.logicalrelid = c.oid)
+ LEFT JOIN
+ pg_am a ON (a.oid = c.relam)
+ JOIN
+ (
+ SELECT ds.logicalrelid AS table_id, SUM(css.size) AS table_size
+ FROM citus_shard_sizes() css, pg_dist_shard ds
+ WHERE css.shard_id = ds.shardid
+ GROUP BY ds.logicalrelid
+ ) table_sizes ON (table_sizes.table_id = p.logicalrelid)
+ WHERE
+ -- filter out tables owned by extensions
+ logicalrelid NOT IN (
+ SELECT
+ objid
+ FROM
+ pg_depend
+ WHERE
+ classid = 'pg_class'::regclass AND refclassid = 'pg_extension'::regclass AND deptype = 'e'
+ )
+ ORDER BY
+ logicalrelid::text;
+$CTCQ$;
+
+IF EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'public') THEN
+ EXECUTE format(citus_tables_create_query, 'public');
+ GRANT SELECT ON public.citus_tables TO public;
+ELSE
+ EXECUTE format(citus_tables_create_query, 'citus');
+ ALTER VIEW citus.citus_tables SET SCHEMA pg_catalog;
+ GRANT SELECT ON pg_catalog.citus_tables TO public;
+END IF;
+
+END;
+$$;
diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c
index 1654bf095..8a723e4c4 100644
--- a/src/backend/distributed/test/fake_am.c
+++ b/src/backend/distributed/test/fake_am.c
@@ -254,7 +254,7 @@ fake_tuple_update(Relation relation, ItemPointer otid,
TupleTableSlot *slot, CommandId cid,
Snapshot snapshot, Snapshot crosscheck,
bool wait, TM_FailureData *tmfd,
- LockTupleMode *lockmode, bool *update_indexes)
+ LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes)
{
elog(ERROR, "fake_tuple_update not implemented");
}
@@ -283,7 +283,7 @@ fake_finish_bulk_insert(Relation relation, int options)
*/
static void
fake_relation_set_new_filenode(Relation rel,
- const RelFileNode *newrnode,
+ const RelFileLocator *newrnode,
char persistence,
TransactionId *freezeXid,
MultiXactId *minmulti)
@@ -344,7 +344,7 @@ fake_relation_nontransactional_truncate(Relation rel)
static void
-fake_copy_data(Relation rel, const RelFileNode *newrnode)
+fake_copy_data(Relation rel, const RelFileLocator *newrnode)
{
elog(ERROR, "fake_copy_data not implemented");
}
@@ -555,7 +555,11 @@ static const TableAmRoutine fake_methods = {
.tuple_satisfies_snapshot = fake_tuple_satisfies_snapshot,
.index_delete_tuples = fake_index_delete_tuples,
+#if PG_VERSION_NUM >= PG_VERSION_16
+ .relation_set_new_filelocator = fake_relation_set_new_filenode,
+#else
.relation_set_new_filenode = fake_relation_set_new_filenode,
+#endif
.relation_nontransactional_truncate = fake_relation_nontransactional_truncate,
.relation_copy_data = fake_copy_data,
.relation_copy_for_cluster = fake_copy_for_cluster,
diff --git a/src/backend/distributed/test/pg_send_cancellation.c b/src/backend/distributed/test/pg_send_cancellation.c
deleted file mode 100644
index 576d915a6..000000000
--- a/src/backend/distributed/test/pg_send_cancellation.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * pg_send_cancellation.c
- *
- * This file contains functions to test setting pg_send_cancellation.
- *
- * Copyright (c) Citus Data, Inc.
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-#include "miscadmin.h"
-#include "fmgr.h"
-#include "port.h"
-
-#include "postmaster/postmaster.h"
-
-
-#define PG_SEND_CANCELLATION_VERSION \
- "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n"
-
-
-/* exports for SQL callable functions */
-PG_FUNCTION_INFO_V1(get_cancellation_key);
-PG_FUNCTION_INFO_V1(run_pg_send_cancellation);
-
-
-/*
- * get_cancellation_key returns the cancellation key of the current process
- * as an integer.
- */
-Datum
-get_cancellation_key(PG_FUNCTION_ARGS)
-{
- PG_RETURN_INT32(MyCancelKey);
-}
-
-
-/*
- * run_pg_send_cancellation runs the pg_send_cancellation program with
- * the specified arguments
- */
-Datum
-run_pg_send_cancellation(PG_FUNCTION_ARGS)
-{
- int pid = PG_GETARG_INT32(0);
- int cancelKey = PG_GETARG_INT32(1);
-
- char sendCancellationPath[MAXPGPATH];
- char command[1024];
-
- /* Locate executable backend before we change working directory */
- if (find_other_exec(my_exec_path, "pg_send_cancellation",
- PG_SEND_CANCELLATION_VERSION,
- sendCancellationPath) < 0)
- {
- ereport(ERROR, (errmsg("could not locate pg_send_cancellation")));
- }
-
- pg_snprintf(command, sizeof(command), "%s %d %d %s %d",
- sendCancellationPath, pid, cancelKey, "localhost", PostPortNumber);
-
- if (system(command) != 0)
- {
- ereport(ERROR, (errmsg("failed to run command: %s", command)));
- }
-
- PG_RETURN_VOID();
-}
diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c
index 0be4bb2e9..0b4c0f02e 100644
--- a/src/backend/distributed/transaction/lock_graph.c
+++ b/src/backend/distributed/transaction/lock_graph.c
@@ -725,7 +725,100 @@ UnlockLockData(void)
* which also contains entries for locks which have not been granted yet, but
* it does not reflect the order of the wait queue. We therefore handle the
* wait queue separately.
+ *
+ * We have separate blocks for PG16 and = PG_VERSION_16
+static void
+AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
+{
+ /* the lock for which this process is waiting */
+ LOCK *waitLock = waitingProc->waitLock;
+
+ /* determine the conflict mask for the lock level used by the process */
+ LockMethod lockMethodTable = GetLocksMethodTable(waitLock);
+ int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode];
+
+ /* iterate through the queue of processes holding the lock */
+ dlist_head *procLocks = &waitLock->procLocks;
+
+ dlist_iter iter;
+ dlist_foreach(iter, procLocks)
+ {
+ PROCLOCK *procLock = dlist_container(PROCLOCK, lockLink, iter.cur);
+ PGPROC *currentProc = procLock->tag.myProc;
+
+ /*
+ * Skip processes from the same lock group, processes that don't conflict,
+ * and processes that are waiting on safe operations.
+ */
+ if (!IsSameLockGroup(waitingProc, currentProc) &&
+ IsConflictingLockMask(procLock->holdMask, conflictMask) &&
+ !IsProcessWaitingForSafeOperations(currentProc))
+ {
+ AddWaitEdge(waitGraph, waitingProc, currentProc, remaining);
+ }
+ }
+}
+
+
+static void
+AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
+{
+ /* the lock for which this process is waiting */
+ LOCK *waitLock = waitingProc->waitLock;
+
+ /* determine the conflict mask for the lock level used by the process */
+ LockMethod lockMethodTable = GetLocksMethodTable(waitLock);
+ int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode];
+
+ /* iterate through the wait queue */
+ dclist_head *waitQueue = &waitLock->waitProcs;
+
+ dlist_iter iter;
+ dclist_foreach(iter, waitQueue)
+ {
+ PGPROC *currentProc = dlist_container(PGPROC, links, iter.cur);
+
+ if (currentProc == waitingProc)
+ {
+ /*
+ * Iterate through the queue from the start until we encounter waitingProc,
+ * since we only care about processes in front of waitingProc in the queue.
+ */
+ break;
+ }
+
+ int awaitMask = LOCKBIT_ON(currentProc->waitLockMode);
+
+ /*
+ * Skip processes from the same lock group, processes that don't conflict,
+ * and processes that are waiting on safe operations.
+ */
+ if (!IsSameLockGroup(waitingProc, currentProc) &&
+ IsConflictingLockMask(awaitMask, conflictMask) &&
+ !IsProcessWaitingForSafeOperations(currentProc))
+ {
+ AddWaitEdge(waitGraph, waitingProc, currentProc, remaining);
+ }
+
+ currentProc = (PGPROC *) currentProc->links.next;
+ }
+}
+
+
+#else
+
static void
AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
{
@@ -762,10 +855,6 @@ AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remai
}
-/*
- * AddEdgesForWaitQueue adds an edge to the wait graph for processes in front of
- * waitingProc in the wait queue that are trying to acquire a conflicting lock.
- */
static void
AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining)
{
@@ -805,6 +894,9 @@ AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remai
}
+#endif
+
+
/*
* AddWaitEdge adds a new wait edge to a wait graph. The nodes in the graph are
* transactions and an edge indicates the "waiting" process is blocked on a lock
diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c
index 5add48009..9a7bd9089 100644
--- a/src/backend/distributed/transaction/transaction_management.c
+++ b/src/backend/distributed/transaction/transaction_management.c
@@ -19,6 +19,8 @@
#include "access/twophase.h"
#include "access/xact.h"
+#include "catalog/dependency.h"
+#include "common/hashfn.h"
#include "distributed/backend_data.h"
#include "distributed/citus_safe_lib.h"
#include "distributed/connection_management.h"
@@ -30,6 +32,7 @@
#include "distributed/local_executor.h"
#include "distributed/locally_reserved_shared_connections.h"
#include "distributed/maintenanced.h"
+#include "distributed/metadata/dependency.h"
#include "distributed/multi_executor.h"
#include "distributed/multi_logical_replication.h"
#include "distributed/multi_explain.h"
@@ -89,14 +92,25 @@ StringInfo activeSetStmts;
* Though a list, we treat this as a stack, pushing on subxact contexts whenever
* e.g. a SAVEPOINT is executed (though this is actually performed by providing
* PostgreSQL with a sub-xact callback). At present, the context of a subxact
- * includes a subxact identifier as well as any SET LOCAL statements propagated
- * to workers during the sub-transaction.
+ * includes
+ * - a subxact identifier,
+ * - any SET LOCAL statements propagated to workers during the sub-transaction,
+ * - all objects propagated to workers during the sub-transaction.
*
* To be clear, last item of activeSubXactContexts list corresponds to top of
* stack.
*/
static List *activeSubXactContexts = NIL;
+/*
+ * PropagatedObjectsInTx is a set of objects propagated in the root transaction.
+ * We also keep track of objects propagated in sub-transactions in activeSubXactContexts.
+ * Any committed sub-transaction would cause the objects, which are propagated during
+ * the sub-transaction, to be moved to upper transaction's set. Objects are discarded
+ * when the sub-transaction is aborted.
+ */
+static HTAB *PropagatedObjectsInTx = NULL;
+
/* some pre-allocated memory so we don't need to call malloc() during callbacks */
MemoryContext CitusXactCallbackContext = NULL;
@@ -142,11 +156,17 @@ static void CoordinatedSubTransactionCallback(SubXactEvent event, SubTransaction
/* remaining functions */
static void AdjustMaxPreparedTransactions(void);
static void PushSubXact(SubTransactionId subId);
-static void PopSubXact(SubTransactionId subId);
+static void PopSubXact(SubTransactionId subId, bool commit);
static void ResetGlobalVariables(void);
static bool SwallowErrors(void (*func)(void));
static void ForceAllInProgressConnectionsToClose(void);
static void EnsurePrepareTransactionIsAllowed(void);
+static HTAB * CurrentTransactionPropagatedObjects(bool readonly);
+static HTAB * ParentTransactionPropagatedObjects(bool readonly);
+static void MovePropagatedObjectsToParentTransaction(void);
+static bool DependencyInPropagatedObjectsHash(HTAB *propagatedObjects,
+ const ObjectAddress *dependency);
+static HTAB * CreateTxPropagatedObjectsHash(void);
/*
@@ -321,6 +341,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
ResetGlobalVariables();
ResetRelationAccessHash();
+ ResetPropagatedObjects();
/*
* Make sure that we give the shared connections back to the shared
@@ -391,6 +412,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
ResetGlobalVariables();
ResetRelationAccessHash();
+ ResetPropagatedObjects();
/* Reset any local replication origin session since transaction has been aborted.*/
ResetReplicationOriginLocalSession();
@@ -638,7 +660,7 @@ CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId,
switch (event)
{
/*
- * Our subtransaction stack should be consistent with postgres' internal
+ * Our sub-transaction stack should be consistent with postgres' internal
* transaction stack. In case of subxact begin, postgres calls our
* callback after it has pushed the transaction into stack, so we have to
* do the same even if worker commands fail, so we PushSubXact() first.
@@ -672,7 +694,7 @@ CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId,
{
CoordinatedRemoteTransactionsSavepointRelease(subId);
}
- PopSubXact(subId);
+ PopSubXact(subId, true);
/* Set CachedDuringCitusCreation to one level lower to represent citus creation is done */
@@ -706,7 +728,7 @@ CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId,
{
CoordinatedRemoteTransactionsSavepointRollback(subId);
}
- PopSubXact(subId);
+ PopSubXact(subId, false);
/*
* Clear MetadataCache table if we're aborting from a CREATE EXTENSION Citus
@@ -775,6 +797,9 @@ PushSubXact(SubTransactionId subId)
state->subId = subId;
state->setLocalCmds = activeSetStmts;
+ /* we lazily create hashset when any object is propagated during sub-transaction */
+ state->propagatedObjects = NULL;
+
/* append to list and reset active set stmts for upcoming sub-xact */
activeSubXactContexts = lappend(activeSubXactContexts, state);
activeSetStmts = makeStringInfo();
@@ -783,7 +808,7 @@ PushSubXact(SubTransactionId subId)
/* PopSubXact pops subId from the stack of active sub-transactions. */
static void
-PopSubXact(SubTransactionId subId)
+PopSubXact(SubTransactionId subId, bool commit)
{
SubXactContext *state = llast(activeSubXactContexts);
@@ -806,6 +831,16 @@ PopSubXact(SubTransactionId subId)
*/
activeSetStmts = state->setLocalCmds;
+ /*
+ * Keep subtransaction's propagated objects at parent transaction
+ * if subtransaction committed. Otherwise, discard them.
+ */
+ if (commit)
+ {
+ MovePropagatedObjectsToParentTransaction();
+ }
+ hash_destroy(state->propagatedObjects);
+
/*
* Free state to avoid memory leaks when we create subxacts for each row,
* e.g. in exception handling of UDFs.
@@ -913,3 +948,227 @@ EnsurePrepareTransactionIsAllowed(void)
errmsg("cannot use 2PC in transactions involving "
"multiple servers")));
}
+
+
+/*
+ * CurrentTransactionPropagatedObjects returns the objects propagated in current
+ * sub-transaction or the root transaction if no sub-transaction exists.
+ *
+ * If the propagated objects are readonly it will not create the hashmap if it does not
+ * already exist in the current sub-transaction.
+ */
+static HTAB *
+CurrentTransactionPropagatedObjects(bool readonly)
+{
+ if (activeSubXactContexts == NIL)
+ {
+ /* hashset in the root transaction if there is no sub-transaction */
+ if (PropagatedObjectsInTx == NULL && !readonly)
+ {
+ /* lazily create hashset for root transaction, for mutating uses */
+ PropagatedObjectsInTx = CreateTxPropagatedObjectsHash();
+ }
+ return PropagatedObjectsInTx;
+ }
+
+ /* hashset in top level sub-transaction */
+ SubXactContext *state = llast(activeSubXactContexts);
+ if (state->propagatedObjects == NULL && !readonly)
+ {
+ /* lazily create hashset for sub-transaction, for mutating uses */
+ state->propagatedObjects = CreateTxPropagatedObjectsHash();
+ }
+ return state->propagatedObjects;
+}
+
+
+/*
+ * ParentTransactionPropagatedObjects returns the objects propagated in parent
+ * transaction of active sub-transaction. It returns the root transaction if
+ * no sub-transaction exists.
+ *
+ * If the propagated objects are readonly it will not create the hashmap if it does not
+ * already exist in the target sub-transaction.
+ */
+static HTAB *
+ParentTransactionPropagatedObjects(bool readonly)
+{
+ int nestingLevel = list_length(activeSubXactContexts);
+ if (nestingLevel <= 1)
+ {
+ /*
+ * The parent is the root transaction, when there is single level sub-transaction
+ * or no sub-transaction.
+ */
+ if (PropagatedObjectsInTx == NULL && !readonly)
+ {
+ /* lazily create hashset for root transaction, for mutating uses */
+ PropagatedObjectsInTx = CreateTxPropagatedObjectsHash();
+ }
+ return PropagatedObjectsInTx;
+ }
+
+ /* parent is upper sub-transaction */
+ Assert(nestingLevel >= 2);
+ SubXactContext *state = list_nth(activeSubXactContexts, nestingLevel - 2);
+ if (state->propagatedObjects == NULL && !readonly)
+ {
+ /* lazily create hashset for parent sub-transaction */
+ state->propagatedObjects = CreateTxPropagatedObjectsHash();
+ }
+ return state->propagatedObjects;
+}
+
+
+/*
+ * MovePropagatedObjectsToParentTransaction moves all objects propagated in the current
+ * sub-transaction to the parent transaction. This should only be called when there is
+ * active sub-transaction.
+ */
+static void
+MovePropagatedObjectsToParentTransaction(void)
+{
+ Assert(llast(activeSubXactContexts) != NULL);
+ HTAB *currentPropagatedObjects = CurrentTransactionPropagatedObjects(true);
+ if (currentPropagatedObjects == NULL)
+ {
+ /* nothing to move */
+ return;
+ }
+
+ /*
+ * Only after we know we have objects to move into the parent do we get a handle on
+ * a guaranteed existing parent hash table. This makes sure that the parents only
+ * get populated once there are objects to be tracked.
+ */
+ HTAB *parentPropagatedObjects = ParentTransactionPropagatedObjects(false);
+
+ HASH_SEQ_STATUS propagatedObjectsSeq;
+ hash_seq_init(&propagatedObjectsSeq, currentPropagatedObjects);
+ ObjectAddress *objectAddress = NULL;
+ while ((objectAddress = hash_seq_search(&propagatedObjectsSeq)) != NULL)
+ {
+ hash_search(parentPropagatedObjects, objectAddress, HASH_ENTER, NULL);
+ }
+}
+
+
+/*
+ * DependencyInPropagatedObjectsHash checks if dependency is in given hashset
+ * of propagated objects.
+ */
+static bool
+DependencyInPropagatedObjectsHash(HTAB *propagatedObjects, const
+ ObjectAddress *dependency)
+{
+ if (propagatedObjects == NULL)
+ {
+ return false;
+ }
+
+ bool found = false;
+ hash_search(propagatedObjects, dependency, HASH_FIND, &found);
+ return found;
+}
+
+
+/*
+ * CreateTxPropagatedObjectsHash creates a hashset to keep track of the objects
+ * propagated in the current root transaction or sub-transaction.
+ */
+static HTAB *
+CreateTxPropagatedObjectsHash(void)
+{
+ HASHCTL info;
+ memset(&info, 0, sizeof(info));
+ info.keysize = sizeof(ObjectAddress);
+ info.entrysize = sizeof(ObjectAddress);
+ info.hash = tag_hash;
+ info.hcxt = CitusXactCallbackContext;
+
+ int hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION);
+ return hash_create("Tx Propagated Objects", 16, &info, hashFlags);
+}
+
+
+/*
+ * TrackPropagatedObject adds given object into the objects propagated in the current
+ * sub-transaction.
+ */
+void
+TrackPropagatedObject(const ObjectAddress *objectAddress)
+{
+ HTAB *currentPropagatedObjects = CurrentTransactionPropagatedObjects(false);
+ hash_search(currentPropagatedObjects, objectAddress, HASH_ENTER, NULL);
+}
+
+
+/*
+ * TrackPropagatedTableAndSequences adds given table and its sequences to the objects
+ * propagated in the current sub-transaction.
+ */
+void
+TrackPropagatedTableAndSequences(Oid relationId)
+{
+ /* track table */
+ ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress));
+ ObjectAddressSet(*tableAddress, RelationRelationId, relationId);
+ TrackPropagatedObject(tableAddress);
+
+ /* track its sequences */
+ List *ownedSeqIdList = getOwnedSequences(relationId);
+ Oid ownedSeqId = InvalidOid;
+ foreach_oid(ownedSeqId, ownedSeqIdList)
+ {
+ ObjectAddress *seqAddress = palloc0(sizeof(ObjectAddress));
+ ObjectAddressSet(*seqAddress, RelationRelationId, ownedSeqId);
+ TrackPropagatedObject(seqAddress);
+ }
+}
+
+
+/*
+ * ResetPropagatedObjects destroys hashset of propagated objects in the root transaction.
+ */
+void
+ResetPropagatedObjects(void)
+{
+ hash_destroy(PropagatedObjectsInTx);
+ PropagatedObjectsInTx = NULL;
+}
+
+
+/*
+ * HasAnyDependencyInPropagatedObjects decides if any dependency of given object is
+ * propagated in the current transaction.
+ */
+bool
+HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress)
+{
+ List *dependencyList = GetAllSupportedDependenciesForObject(objectAddress);
+ ObjectAddress *dependency = NULL;
+ foreach_ptr(dependency, dependencyList)
+ {
+ /* first search in root transaction */
+ if (DependencyInPropagatedObjectsHash(PropagatedObjectsInTx, dependency))
+ {
+ return true;
+ }
+
+ /* search in all nested sub-transactions */
+ if (activeSubXactContexts == NIL)
+ {
+ continue;
+ }
+ SubXactContext *state = NULL;
+ foreach_ptr(state, activeSubXactContexts)
+ {
+ if (DependencyInPropagatedObjectsHash(state->propagatedObjects, dependency))
+ {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c
index c2ccd2478..b46419dc2 100644
--- a/src/backend/distributed/transaction/transaction_recovery.c
+++ b/src/backend/distributed/transaction/transaction_recovery.c
@@ -404,7 +404,7 @@ PendingWorkerTransactionList(MultiConnection *connection)
int32 coordinatorId = GetLocalGroupId();
appendStringInfo(command, "SELECT gid FROM pg_prepared_xacts "
- "WHERE gid LIKE 'citus\\_%d\\_%%'",
+ "WHERE gid LIKE 'citus\\_%d\\_%%' and database = current_database()",
coordinatorId);
int querySent = SendRemoteCommand(connection, command->data);
diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c
index b4a497647..03ecbea72 100644
--- a/src/backend/distributed/transaction/worker_transaction.c
+++ b/src/backend/distributed/transaction/worker_transaction.c
@@ -124,7 +124,7 @@ SendCommandToWorkersWithMetadata(const char *command)
* owner to ensure write access to the Citus metadata tables.
*
* Since we prevent to open superuser connections for metadata tables, it is
- * discourated to use it. Consider using it only for propagating pg_dist_object
+ * discouraged to use it. Consider using it only for propagating pg_dist_object
* tuples for dependent objects.
*/
void
@@ -135,6 +135,21 @@ SendCommandToWorkersWithMetadataViaSuperUser(const char *command)
}
+/*
+ * SendCommandListToWorkersWithMetadata sends all commands to all metadata workers
+ * with the current user. See `SendCommandToWorkersWithMetadata`for details.
+ */
+void
+SendCommandListToWorkersWithMetadata(List *commands)
+{
+ char *command = NULL;
+ foreach_ptr(command, commands)
+ {
+ SendCommandToWorkersWithMetadata(command);
+ }
+}
+
+
/*
* TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the
* TargetWorkerSet.
diff --git a/src/backend/distributed/utils/acquire_lock.c b/src/backend/distributed/utils/acquire_lock.c
index c33ef0376..f414167b3 100644
--- a/src/backend/distributed/utils/acquire_lock.c
+++ b/src/backend/distributed/utils/acquire_lock.c
@@ -39,7 +39,7 @@
#include "distributed/version_compat.h"
/* forward declaration of background worker entrypoint */
-extern void LockAcquireHelperMain(Datum main_arg);
+extern PGDLLEXPORT void LockAcquireHelperMain(Datum main_arg);
/* forward declaration of helper functions */
static void lock_acquire_helper_sigterm(SIGNAL_ARGS);
diff --git a/src/backend/distributed/utils/aggregate_utils.c b/src/backend/distributed/utils/aggregate_utils.c
index 91f6fc523..773e0aa25 100644
--- a/src/backend/distributed/utils/aggregate_utils.c
+++ b/src/backend/distributed/utils/aggregate_utils.c
@@ -168,7 +168,8 @@ aclcheckAggregate(ObjectType objectType, Oid userOid, Oid funcOid)
AclResult aclresult;
if (funcOid != InvalidOid)
{
- aclresult = pg_proc_aclcheck(funcOid, userOid, ACL_EXECUTE);
+ aclresult = object_aclcheck(ProcedureRelationId, funcOid, userOid,
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
{
aclcheck_error(aclresult, objectType, get_func_name(funcOid));
diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c
index c386e9fcf..e7007874b 100644
--- a/src/backend/distributed/utils/colocation_utils.c
+++ b/src/backend/distributed/utils/colocation_utils.c
@@ -53,6 +53,7 @@ static int CompareShardPlacementsByNode(const void *leftElement,
const void *rightElement);
static uint32 CreateColocationGroupForRelation(Oid sourceRelationId);
static void BreakColocation(Oid sourceRelationId);
+static uint32 SingleShardTableGetNodeId(Oid relationId);
/* exports for SQL callable functions */
@@ -174,12 +175,11 @@ BreakColocation(Oid sourceRelationId)
*/
Relation pgDistColocation = table_open(DistColocationRelationId(), ExclusiveLock);
- uint32 newColocationId = GetNextColocationId();
- bool localOnly = false;
- UpdateRelationColocationGroup(sourceRelationId, newColocationId, localOnly);
+ uint32 oldColocationId = TableColocationId(sourceRelationId);
+ CreateColocationGroupForRelation(sourceRelationId);
- /* if there is not any remaining table in the colocation group, delete it */
- DeleteColocationGroupIfNoTablesBelong(sourceRelationId);
+ /* if there is not any remaining table in the old colocation group, delete it */
+ DeleteColocationGroupIfNoTablesBelong(oldColocationId);
table_close(pgDistColocation, NoLock);
}
@@ -532,7 +532,7 @@ ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_distributioncolumntype,
BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributionColumnType));
ScanKeyInit(&scanKey[1], Anum_pg_dist_colocation_shardcount,
- BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(shardCount));
+ BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(shardCount));
ScanKeyInit(&scanKey[2], Anum_pg_dist_colocation_replicationfactor,
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(replicationFactor));
ScanKeyInit(&scanKey[3], Anum_pg_dist_colocation_distributioncolumncollation,
@@ -989,7 +989,7 @@ ColocationGroupTableList(uint32 colocationId, uint32 count)
}
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid,
- BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId));
+ BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(colocationId));
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
@@ -1166,7 +1166,7 @@ ColocatedNonPartitionShardIntervalList(ShardInterval *shardInterval)
* guarantee that the table isn't dropped for the remainder of the transaction.
*/
Oid
-ColocatedTableId(Oid colocationId)
+ColocatedTableId(int32 colocationId)
{
Oid colocatedTableId = InvalidOid;
bool indexOK = true;
@@ -1183,7 +1183,7 @@ ColocatedTableId(Oid colocationId)
}
ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid,
- BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId));
+ BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(colocationId));
Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock);
TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition);
@@ -1231,6 +1231,56 @@ ColocatedTableId(Oid colocationId)
}
+/*
+ * SingleShardTableColocationNodeId takes a colocation id that presumably
+ * belongs to colocation group used to colocate a set of single-shard
+ * tables and returns id of the node that stores / is expected to store
+ * the shards within the colocation group.
+ */
+uint32
+SingleShardTableColocationNodeId(uint32 colocationId)
+{
+ List *tablesInColocationGroup = ColocationGroupTableList(colocationId, 0);
+ if (list_length(tablesInColocationGroup) == 0)
+ {
+ int workerNodeIndex =
+ EmptySingleShardTableColocationDecideNodeId(colocationId);
+ List *workerNodeList = DistributedTablePlacementNodeList(RowShareLock);
+ WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex);
+
+ return workerNode->nodeId;
+ }
+ else
+ {
+ Oid colocatedTableId = ColocatedTableId(colocationId);
+ return SingleShardTableGetNodeId(colocatedTableId);
+ }
+}
+
+
+/*
+ * SingleShardTableGetNodeId returns id of the node that stores shard of
+ * given single-shard table.
+ */
+static uint32
+SingleShardTableGetNodeId(Oid relationId)
+{
+ if (!IsCitusTableType(relationId, SINGLE_SHARD_DISTRIBUTED))
+ {
+ ereport(ERROR, (errmsg("table is not a single-shard distributed table")));
+ }
+
+ int64 shardId = GetFirstShardId(relationId);
+ List *shardPlacementList = ShardPlacementList(shardId);
+ if (list_length(shardPlacementList) != 1)
+ {
+ ereport(ERROR, (errmsg("table shard does not have a single shard placement")));
+ }
+
+ return ((ShardPlacement *) linitial(shardPlacementList))->nodeId;
+}
+
+
/*
* ColocatedShardIdInRelation returns shardId of the shard from given relation, so that
* returned shard is co-located with given shard.
@@ -1292,7 +1342,7 @@ DeleteColocationGroupLocally(uint32 colocationId)
Relation pgDistColocation = table_open(DistColocationRelationId(), RowExclusiveLock);
ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_colocationid,
- BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId));
+ BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(colocationId));
SysScanDesc scanDescriptor = systable_beginscan(pgDistColocation, InvalidOid, indexOK,
NULL, scanKeyCount, scanKey);
diff --git a/src/backend/distributed/utils/function_utils.c b/src/backend/distributed/utils/function_utils.c
index 006e29555..48f878e13 100644
--- a/src/backend/distributed/utils/function_utils.c
+++ b/src/backend/distributed/utils/function_utils.c
@@ -41,7 +41,8 @@ FunctionOidExtended(const char *schemaName, const char *functionName, int argume
bool missingOK)
{
char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName);
- List *qualifiedFunctionNameList = stringToQualifiedNameList(qualifiedFunctionName);
+ List *qualifiedFunctionNameList = stringToQualifiedNameList_compat(
+ qualifiedFunctionName);
List *argumentList = NIL;
const bool findVariadics = false;
const bool findDefaults = false;
diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c
index ab36483fd..924ba4c54 100644
--- a/src/backend/distributed/utils/multi_partitioning_utils.c
+++ b/src/backend/distributed/utils/multi_partitioning_utils.c
@@ -411,9 +411,9 @@ CheckConstraintNameListForRelation(Oid relationId)
Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid,
- BTEqualStrategyNumber, F_OIDEQ, relationId);
+ BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId));
ScanKeyInit(&scanKey[1], Anum_pg_constraint_contype,
- BTEqualStrategyNumber, F_CHAREQ, CONSTRAINT_CHECK);
+ BTEqualStrategyNumber, F_CHAREQ, CharGetDatum(CONSTRAINT_CHECK));
bool useIndex = false;
SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, InvalidOid, useIndex,
diff --git a/src/backend/distributed/utils/namespace_utils.c b/src/backend/distributed/utils/namespace_utils.c
index a97adb573..4f822b7d2 100644
--- a/src/backend/distributed/utils/namespace_utils.c
+++ b/src/backend/distributed/utils/namespace_utils.c
@@ -11,22 +11,33 @@
#include "postgres.h"
-#include "catalog/namespace.h"
-#include "distributed/citus_ruleutils.h"
#include "distributed/namespace_utils.h"
+#include "utils/guc.h"
#include "utils/regproc.h"
/*
- * PushOverrideEmptySearchPath pushes search_path to be NIL and sets addCatalog to
- * true so that all objects outside of pg_catalog will be schema-prefixed.
- * Afterwards, PopOverrideSearchPath can be used to revert the search_path back.
+ * We use the equivalent of a function SET option to allow the setting to
+ * persist for the exact duration of the transaction, guc.c takes care of
+ * undoing the setting on error.
+ *
+ * We set search_path to "pg_catalog" instead of "" to expose useful utilities.
+ */
+int
+PushEmptySearchPath()
+{
+ int saveNestLevel = NewGUCNestLevel();
+ (void) set_config_option("search_path", "pg_catalog",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0, false);
+ return saveNestLevel;
+}
+
+
+/*
+ * Restore the GUC variable search_path we set in PushEmptySearchPath
*/
void
-PushOverrideEmptySearchPath(MemoryContext memoryContext)
+PopEmptySearchPath(int saveNestLevel)
{
- OverrideSearchPath *overridePath = GetOverrideSearchPath(memoryContext);
- overridePath->schemas = NIL;
- overridePath->addCatalog = true;
-
- PushOverrideSearchPath(overridePath);
+ AtEOXact_GUC(true, saveNestLevel);
}
diff --git a/src/backend/distributed/utils/relation_utils.c b/src/backend/distributed/utils/relation_utils.c
index 12c76a4ab..d39c1f071 100644
--- a/src/backend/distributed/utils/relation_utils.c
+++ b/src/backend/distributed/utils/relation_utils.c
@@ -14,6 +14,9 @@
#include "distributed/relation_utils.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "miscadmin.h"
+#endif
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -28,3 +31,31 @@ RelationGetNamespaceName(Relation relation)
char *namespaceName = get_namespace_name(namespaceId);
return namespaceName;
}
+
+
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+/*
+ * GetFilledPermissionInfo creates RTEPermissionInfo for a given RTE
+ * and fills it with given data and returns this RTEPermissionInfo object.
+ * Added this function since Postgres's addRTEPermissionInfo doesn't fill the data.
+ *
+ * Given data consists of relid, inh and requiredPerms
+ * Took a quick look around Postgres, unless specified otherwise,
+ * we are dealing with GetUserId().
+ * Currently the following entries are filled like this:
+ * perminfo->checkAsUser = GetUserId();
+ */
+RTEPermissionInfo *
+GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
+{
+ RTEPermissionInfo *perminfo = makeNode(RTEPermissionInfo);
+ perminfo->relid = relid;
+ perminfo->inh = inh;
+ perminfo->requiredPerms = requiredPerms;
+ perminfo->checkAsUser = GetUserId();
+ return perminfo;
+}
+
+
+#endif
diff --git a/src/backend/distributed/utils/replication_origin_session_utils.c b/src/backend/distributed/utils/replication_origin_session_utils.c
index dbd244271..800d82ef7 100644
--- a/src/backend/distributed/utils/replication_origin_session_utils.c
+++ b/src/backend/distributed/utils/replication_origin_session_utils.c
@@ -20,8 +20,6 @@ static void SetupMemoryContextResetReplicationOriginHandler(void);
static void SetupReplicationOriginSessionHelper(bool isContexResetSetupNeeded);
-static inline bool IsLocalReplicationOriginSessionActive(void);
-
PG_FUNCTION_INFO_V1(citus_internal_start_replication_origin_tracking);
PG_FUNCTION_INFO_V1(citus_internal_stop_replication_origin_tracking);
PG_FUNCTION_INFO_V1(citus_internal_is_replication_origin_tracking_active);
@@ -68,6 +66,16 @@ citus_internal_stop_replication_origin_tracking(PG_FUNCTION_ARGS)
}
+/* IsLocalReplicationOriginSessionActive checks if the current replication origin
+ * session is active in the local node.
+ */
+static inline bool
+IsLocalReplicationOriginSessionActive(void)
+{
+ return (replorigin_session_origin == DoNotReplicateId);
+}
+
+
/* citus_internal_is_replication_origin_tracking_active checks if the current replication origin
* session is active in the local node.
*/
@@ -79,16 +87,6 @@ citus_internal_is_replication_origin_tracking_active(PG_FUNCTION_ARGS)
}
-/* IsLocalReplicationOriginSessionActive checks if the current replication origin
- * session is active in the local node.
- */
-inline bool
-IsLocalReplicationOriginSessionActive(void)
-{
- return (replorigin_session_origin == DoNotReplicateId);
-}
-
-
/*
* SetupMemoryContextResetReplicationOriginHandler registers a callback function
* that resets the replication origin session in case of any error for the current
diff --git a/src/backend/distributed/utils/statistics_collection.c b/src/backend/distributed/utils/statistics_collection.c
index 3783414e6..a442aac95 100644
--- a/src/backend/distributed/utils/statistics_collection.c
+++ b/src/backend/distributed/utils/statistics_collection.c
@@ -14,7 +14,11 @@
#include "fmgr.h"
#include "utils/uuid.h"
+#if defined(HAVE_LIBCURL) && defined(ENABLE_CITUS_STATISTICS_COLLECTION)
bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */
+#else
+bool EnableStatisticsCollection = false;
+#endif
PG_FUNCTION_INFO_V1(citus_server_id);
diff --git a/src/backend/distributed/utils/tenant_schema_metadata.c b/src/backend/distributed/utils/tenant_schema_metadata.c
index a83842541..e634795a2 100644
--- a/src/backend/distributed/utils/tenant_schema_metadata.c
+++ b/src/backend/distributed/utils/tenant_schema_metadata.c
@@ -134,7 +134,7 @@ ColocationIdGetTenantSchemaId(uint32 colocationId)
AccessShareLock);
ScanKeyData scanKey[1];
ScanKeyInit(&scanKey[0], Anum_pg_dist_schema_colocationid,
- BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId));
+ BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(colocationId));
bool indexOk = true;
SysScanDesc scanDescriptor = systable_beginscan(pgDistTenantSchema,
diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c
index b40f712b5..804e71125 100644
--- a/src/backend/distributed/worker/worker_create_or_replace.c
+++ b/src/backend/distributed/worker/worker_create_or_replace.c
@@ -525,8 +525,8 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName)
RenameStmt *stmt = makeNode(RenameStmt);
stmt->renameType = OBJECT_TYPE;
- stmt->object = (Node *) stringToQualifiedNameList(format_type_be_qualified(
- address->objectId));
+ stmt->object = (Node *) stringToQualifiedNameList_compat(format_type_be_qualified(
+ address->objectId));
stmt->newname = newName;
diff --git a/src/bin/pg_send_cancellation/.gitignore b/src/bin/pg_send_cancellation/.gitignore
deleted file mode 100644
index 8088a2e98..000000000
--- a/src/bin/pg_send_cancellation/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-pg_send_cancellation
diff --git a/src/bin/pg_send_cancellation/Makefile b/src/bin/pg_send_cancellation/Makefile
deleted file mode 100644
index 4515c5019..000000000
--- a/src/bin/pg_send_cancellation/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-citus_top_builddir = ../../..
-
-PROGRAM = pg_send_cancellation
-PGFILEDESC = "pg_send_cancellation sends a custom cancellation message"
-OBJS = $(citus_abs_srcdir)/src/bin/pg_send_cancellation/pg_send_cancellation.o
-PG_CPPFLAGS = -I$(libpq_srcdir)
-PG_LIBS_INTERNAL = $(libpq_pgport)
-PG_LDFLAGS += $(LDFLAGS)
-
-include $(citus_top_builddir)/Makefile.global
-
-# We reuse all the Citus flags (incl. security flags), but we are building a program not a shared library
-# We sometimes build Citus with a newer version of gcc than Postgres was built
-# with and this breaks LTO (link-time optimization). Even if disabling it can
-# have some perf impact this is ok because pg_send_cancellation is only used
-# for tests anyway.
-override CFLAGS := $(filter-out -shared, $(CFLAGS)) -fno-lto
-
-# Filter out unneeded dependencies
-override LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses -lpam, $(LIBS))
-
-clean: clean-pg_send_cancellation
-clean-pg_send_cancellation:
- rm -f $(PROGRAM) $(OBJS)
diff --git a/src/bin/pg_send_cancellation/README.md b/src/bin/pg_send_cancellation/README.md
deleted file mode 100644
index c83316419..000000000
--- a/src/bin/pg_send_cancellation/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# pg_send_cancellation
-
-pg_send_cancellation is a program for manually sending a cancellation
-to a Postgres endpoint. It is effectively a command-line version of
-PQcancel in libpq, but it can use any PID or cancellation key.
-
-We use pg_send_cancellation primarily to propagate cancellations between pgbouncers
-behind a load balancer. Since the cancellation protocol involves
-opening a new connection, the new connection may go to a different
-node that does not recognize the cancellation key. To handle that
-scenario, we modified pgbouncer to pass unrecognized cancellation
-keys to a shell command.
-
-Users can configure the cancellation_command, which will be run with:
-```
-
-```
-
-Note that pgbouncer does not use actual PIDs. Instead, it generates PID and cancellation key together a random 8-byte number. This makes the chance of collisions exceedingly small.
-
-By providing pg_send_cancellation as part of Citus, we can use a shell script that pgbouncer invokes to propagate the cancellation to all *other* worker nodes in the same cluster, for example:
-
-```bash
-#!/bin/sh
-remote_ip=$1
-remote_port=$2
-pid=$3
-cancel_key=$4
-
-postgres_path=/usr/pgsql-14/bin
-pgbouncer_port=6432
-
-nodes_query="select nodename from pg_dist_node where groupid > 0 and groupid not in (select groupid from pg_dist_local_group) and nodecluster = current_setting('citus.cluster_name')"
-
-# Get hostnames of other worker nodes in the cluster, and send cancellation to their pgbouncers
-$postgres_path/psql -c "$nodes_query" -tAX | xargs -n 1 sh -c "$postgres_path/pg_send_cancellation $pid $cancel_key \$0 $pgbouncer_port"
-```
-
-One thing we need to be careful about is that the cancellations do not get forwarded
-back-and-forth. This is handled in pgbouncer by setting the last bit of all generated
-cancellation keys (sent to clients) to 1, and setting the last bit of all forwarded bits to 0.
-That way, when a pgbouncer receives a cancellation key with the last bit set to 0,
-it knows it is from another pgbouncer and should not forward further, and should set
-the last bit to 1 when comparing to stored cancellation keys.
-
-Another thing we need to be careful about is that the integers should be encoded
-as big endian on the wire.
diff --git a/src/bin/pg_send_cancellation/pg_send_cancellation.c b/src/bin/pg_send_cancellation/pg_send_cancellation.c
deleted file mode 100644
index 0ab2be95a..000000000
--- a/src/bin/pg_send_cancellation/pg_send_cancellation.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * pg_send_cancellation is a program for manually sending a cancellation
- * to a Postgres endpoint. It is effectively a command-line version of
- * PQcancel in libpq, but it can use any PID or cancellation key.
- *
- * Portions Copyright (c) Citus Data, Inc.
- *
- * For the internal_cancel function:
- *
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * Permission to use, copy, modify, and distribute this software and its
- * documentation for any purpose, without fee, and without a written agreement
- * is hereby granted, provided that the above copyright notice and this
- * paragraph and the following two paragraphs appear in all copies.
- *
- * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
- * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
- * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
- * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
- * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
- * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
- *
- */
-#include "postgres_fe.h"
-
-#include
-#include
-#include
-#include
-#include
-
-#include "common/ip.h"
-#include "common/link-canary.h"
-#include "common/scram-common.h"
-#include "common/string.h"
-#include "libpq-fe.h"
-#include "libpq-int.h"
-#include "mb/pg_wchar.h"
-#include "port/pg_bswap.h"
-
-
-#define ERROR_BUFFER_SIZE 256
-
-
-static int internal_cancel(SockAddr *raddr, int be_pid, int be_key,
- char *errbuf, int errbufsize);
-
-
-/*
- * main entry point into the pg_send_cancellation program.
- */
-int
-main(int argc, char *argv[])
-{
- if (argc == 2 && strcmp(argv[1], "-V") == 0)
- {
- pg_fprintf(stdout, "pg_send_cancellation (PostgreSQL) " PG_VERSION "\n");
- return 0;
- }
-
- if (argc < 4 || argc > 5)
- {
- char *program = argv[0];
- pg_fprintf(stderr, "%s requires 4 arguments\n\n", program);
- pg_fprintf(stderr, "Usage: %s [port]\n", program);
- return 1;
- }
-
- char *pidString = argv[1];
- char *cancelKeyString = argv[2];
- char *host = argv[3];
- char *portString = "5432";
-
- if (argc >= 5)
- {
- portString = argv[4];
- }
-
- /* parse the PID and cancellation key */
- int pid = strtol(pidString, NULL, 10);
- int cancelAuthCode = strtol(cancelKeyString, NULL, 10);
-
- char errorBuffer[ERROR_BUFFER_SIZE] = { 0 };
-
- struct addrinfo *ipAddressList;
- struct addrinfo hint;
- int ipAddressListFamily = AF_UNSPEC;
- SockAddr socketAddress;
-
- memset(&hint, 0, sizeof(hint));
- hint.ai_socktype = SOCK_STREAM;
- hint.ai_family = ipAddressListFamily;
-
- /* resolve the hostname to an IP */
- int ret = pg_getaddrinfo_all(host, portString, &hint, &ipAddressList);
- if (ret || !ipAddressList)
- {
- pg_fprintf(stderr, "could not translate host name \"%s\" to address: %s\n",
- host, gai_strerror(ret));
- return 1;
- }
-
- if (ipAddressList->ai_addrlen > sizeof(socketAddress.addr))
- {
- pg_fprintf(stderr, "invalid address length");
- return 1;
- }
-
- /*
- * Explanation of IGNORE-BANNED:
- * This is a common pattern when using getaddrinfo. The system guarantees
- * that ai_addrlen < sizeof(socketAddress.addr). Out of an abundance of
- * caution. We also check it above.
- */
- memcpy(&socketAddress.addr, ipAddressList->ai_addr, ipAddressList->ai_addrlen); /* IGNORE-BANNED */
- socketAddress.salen = ipAddressList->ai_addrlen;
-
- /* send the cancellation */
- bool cancelSucceeded = internal_cancel(&socketAddress, pid, cancelAuthCode,
- errorBuffer, sizeof(errorBuffer));
- if (!cancelSucceeded)
- {
- pg_fprintf(stderr, "sending cancellation to %s:%s failed: %s",
- host, portString, errorBuffer);
- return 1;
- }
-
- pg_freeaddrinfo_all(ipAddressListFamily, ipAddressList);
-
- return 0;
-}
-
-
-/* *INDENT-OFF* */
-
-/*
- * internal_cancel is copied from fe-connect.c
- *
- * The return value is true if the cancel request was successfully
- * dispatched, false if not (in which case an error message is available).
- * Note: successful dispatch is no guarantee that there will be any effect at
- * the backend. The application must read the operation result as usual.
- *
- * CAUTION: we want this routine to be safely callable from a signal handler
- * (for example, an application might want to call it in a SIGINT handler).
- * This means we cannot use any C library routine that might be non-reentrant.
- * malloc/free are often non-reentrant, and anything that might call them is
- * just as dangerous. We avoid sprintf here for that reason. Building up
- * error messages with strcpy/strcat is tedious but should be quite safe.
- * We also save/restore errno in case the signal handler support doesn't.
- *
- * internal_cancel() is an internal helper function to make code-sharing
- * between the two versions of the cancel function possible.
- */
-static int
-internal_cancel(SockAddr *raddr, int be_pid, int be_key,
- char *errbuf, int errbufsize)
-{
- int save_errno = SOCK_ERRNO;
- pgsocket tmpsock = PGINVALID_SOCKET;
- char sebuf[PG_STRERROR_R_BUFLEN];
- int maxlen;
- struct
- {
- uint32 packetlen;
- CancelRequestPacket cp;
- } crp;
-
- /*
- * We need to open a temporary connection to the postmaster. Do this with
- * only kernel calls.
- */
- if ((tmpsock = socket(raddr->addr.ss_family, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
- {
- strlcpy(errbuf, "PQcancel() -- socket() failed: ", errbufsize);
- goto cancel_errReturn;
- }
-retry3:
- if (connect(tmpsock, (struct sockaddr *) &raddr->addr, raddr->salen) < 0)
- {
- if (SOCK_ERRNO == EINTR)
- /* Interrupted system call - we'll just try again */
- goto retry3;
- strlcpy(errbuf, "PQcancel() -- connect() failed: ", errbufsize);
- goto cancel_errReturn;
- }
-
- /*
- * We needn't set nonblocking I/O or NODELAY options here.
- */
-
- /* Create and send the cancel request packet. */
-
- crp.packetlen = pg_hton32((uint32) sizeof(crp));
- crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE);
- crp.cp.backendPID = pg_hton32(be_pid);
- crp.cp.cancelAuthCode = pg_hton32(be_key);
-
-retry4:
- if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp))
- {
- if (SOCK_ERRNO == EINTR)
- /* Interrupted system call - we'll just try again */
- goto retry4;
- strlcpy(errbuf, "PQcancel() -- send() failed: ", errbufsize);
- goto cancel_errReturn;
- }
-
- /*
- * Wait for the postmaster to close the connection, which indicates that
- * it's processed the request. Without this delay, we might issue another
- * command only to find that our cancel zaps that command instead of the
- * one we thought we were canceling. Note we don't actually expect this
- * read to obtain any data, we are just waiting for EOF to be signaled.
- */
-retry5:
- if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
- {
- if (SOCK_ERRNO == EINTR)
- /* Interrupted system call - we'll just try again */
- goto retry5;
- /* we ignore other error conditions */
- }
-
- /* All done */
- closesocket(tmpsock);
- SOCK_ERRNO_SET(save_errno);
- return true;
-
-cancel_errReturn:
-
- /*
- * Make sure we don't overflow the error buffer. Leave space for the \n at
- * the end, and for the terminating zero.
- */
- maxlen = errbufsize - strlen(errbuf) - 2;
- if (maxlen >= 0)
- {
- /*
- * Explanation of IGNORE-BANNED:
- * This is well-tested libpq code that we would like to preserve in its
- * original form. The appropriate length calculation is done above.
- */
- strncat(errbuf, SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)), /* IGNORE-BANNED */
- maxlen);
- strcat(errbuf, "\n"); /* IGNORE-BANNED */
- }
- if (tmpsock != PGINVALID_SOCKET)
- closesocket(tmpsock);
- SOCK_ERRNO_SET(save_errno);
- return false;
-}
-
-/* *INDENT-ON* */
diff --git a/src/include/columnar/columnar.h b/src/include/columnar/columnar.h
index 13195b1d6..64cf745e1 100644
--- a/src/include/columnar/columnar.h
+++ b/src/include/columnar/columnar.h
@@ -16,9 +16,14 @@
#include "fmgr.h"
#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
+#include "pg_version_compat.h"
#include "storage/bufpage.h"
#include "storage/lockdefs.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "storage/relfilelocator.h"
+#else
#include "storage/relfilenode.h"
+#endif
#include "utils/relcache.h"
#include "utils/snapmgr.h"
@@ -224,7 +229,7 @@ extern void columnar_init_gucs(void);
extern CompressionType ParseCompressionType(const char *compressionTypeString);
/* Function declarations for writing to a columnar table */
-extern ColumnarWriteState * ColumnarBeginWrite(RelFileNode relfilenode,
+extern ColumnarWriteState * ColumnarBeginWrite(RelFileLocator relfilelocator,
ColumnarOptions options,
TupleDesc tupleDescriptor);
extern uint64 ColumnarWriteRow(ColumnarWriteState *state, Datum *columnValues,
@@ -269,31 +274,31 @@ extern ChunkData * CreateEmptyChunkData(uint32 columnCount, bool *columnMask,
uint32 chunkGroupRowCount);
extern void FreeChunkData(ChunkData *chunkData);
extern uint64 ColumnarTableRowCount(Relation relation);
-extern const char * CompressionTypeStr(CompressionType type);
+extern PGDLLEXPORT const char * CompressionTypeStr(CompressionType type);
/* columnar_metadata_tables.c */
-extern void InitColumnarOptions(Oid regclass);
-extern void SetColumnarOptions(Oid regclass, ColumnarOptions *options);
-extern bool DeleteColumnarTableOptions(Oid regclass, bool missingOk);
-extern bool ReadColumnarOptions(Oid regclass, ColumnarOptions *options);
-extern bool IsColumnarTableAmTable(Oid relationId);
+extern PGDLLEXPORT void InitColumnarOptions(Oid regclass);
+extern PGDLLEXPORT void SetColumnarOptions(Oid regclass, ColumnarOptions *options);
+extern PGDLLEXPORT bool DeleteColumnarTableOptions(Oid regclass, bool missingOk);
+extern PGDLLEXPORT bool ReadColumnarOptions(Oid regclass, ColumnarOptions *options);
+extern PGDLLEXPORT bool IsColumnarTableAmTable(Oid relationId);
/* columnar_metadata_tables.c */
-extern void DeleteMetadataRows(RelFileNode relfilenode);
+extern void DeleteMetadataRows(RelFileLocator relfilelocator);
extern uint64 ColumnarMetadataNewStorageId(void);
-extern uint64 GetHighestUsedAddress(RelFileNode relfilenode);
+extern uint64 GetHighestUsedAddress(RelFileLocator relfilelocator);
extern EmptyStripeReservation * ReserveEmptyStripe(Relation rel, uint64 columnCount,
uint64 chunkGroupRowCount,
uint64 stripeRowCount);
extern StripeMetadata * CompleteStripeReservation(Relation rel, uint64 stripeId,
uint64 sizeBytes, uint64 rowCount,
uint64 chunkCount);
-extern void SaveStripeSkipList(RelFileNode relfilenode, uint64 stripe,
+extern void SaveStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
StripeSkipList *stripeSkipList,
TupleDesc tupleDescriptor);
-extern void SaveChunkGroups(RelFileNode relfilenode, uint64 stripe,
+extern void SaveChunkGroups(RelFileLocator relfilelocator, uint64 stripe,
List *chunkGroupRowCounts);
-extern StripeSkipList * ReadStripeSkipList(RelFileNode relfilenode, uint64 stripe,
+extern StripeSkipList * ReadStripeSkipList(RelFileLocator relfilelocator, uint64 stripe,
TupleDesc tupleDescriptor,
uint32 chunkCount,
Snapshot snapshot);
@@ -316,15 +321,16 @@ extern ColumnarWriteState * columnar_init_write_state(Relation relation, TupleDe
tupdesc,
Oid tupSlotRelationId,
SubTransactionId currentSubXid);
-extern void FlushWriteStateForRelfilenode(Oid relfilenode, SubTransactionId
- currentSubXid);
+extern void FlushWriteStateForRelfilenumber(RelFileNumber relfilenumber,
+ SubTransactionId currentSubXid);
extern void FlushWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId
parentSubXid);
extern void DiscardWriteStateForAllRels(SubTransactionId currentSubXid, SubTransactionId
parentSubXid);
-extern void MarkRelfilenodeDropped(Oid relfilenode, SubTransactionId currentSubXid);
-extern void NonTransactionDropWriteState(Oid relfilenode);
-extern bool PendingWritesInUpperTransactions(Oid relfilenode,
+extern void MarkRelfilenumberDropped(RelFileNumber relfilenumber,
+ SubTransactionId currentSubXid);
+extern void NonTransactionDropWriteState(RelFileNumber relfilenumber);
+extern bool PendingWritesInUpperTransactions(RelFileNumber relfilenumber,
SubTransactionId currentSubXid);
extern MemoryContext GetWriteContextForDebug(void);
diff --git a/src/include/columnar/columnar_metadata.h b/src/include/columnar/columnar_metadata.h
index c17799483..2af4354a0 100644
--- a/src/include/columnar/columnar_metadata.h
+++ b/src/include/columnar/columnar_metadata.h
@@ -12,6 +12,8 @@
#ifndef COLUMNAR_METADATA_H
#define COLUMNAR_METADATA_H
+#include "pg_version_compat.h"
+
/*
* StripeMetadata represents information about a stripe. This information is
* stored in the metadata table "columnar.stripe".
@@ -49,7 +51,7 @@ typedef struct EmptyStripeReservation
uint64 stripeFirstRowNumber;
} EmptyStripeReservation;
-extern List * StripesForRelfilenode(RelFileNode relfilenode);
+extern List * StripesForRelfilelocator(RelFileLocator relfilelocator);
extern void ColumnarStorageUpdateIfNeeded(Relation rel, bool isUpgrade);
extern List * ExtractColumnarRelOptions(List *inOptions, List **outColumnarOptions);
extern void SetColumnarRelOptions(RangeVar *rv, List *reloptions);
diff --git a/src/include/columnar/columnar_tableam.h b/src/include/columnar/columnar_tableam.h
index 04f93fe30..657491ef8 100644
--- a/src/include/columnar/columnar_tableam.h
+++ b/src/include/columnar/columnar_tableam.h
@@ -56,7 +56,7 @@ extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot sna
uint32 flags, Bitmapset *attr_needed,
List *scanQual);
extern int64 ColumnarScanChunkGroupsFiltered(ColumnarScanDesc columnarScanDesc);
-extern bool ColumnarSupportsIndexAM(char *indexAMName);
+extern PGDLLEXPORT bool ColumnarSupportsIndexAM(char *indexAMName);
extern bool IsColumnarTableAmTable(Oid relationId);
extern void CheckCitusColumnarCreateExtensionStmt(Node *parseTree);
extern void CheckCitusColumnarAlterExtensionStmt(Node *parseTree);
diff --git a/src/include/distributed/background_jobs.h b/src/include/distributed/background_jobs.h
index 35745c014..fc714259f 100644
--- a/src/include/distributed/background_jobs.h
+++ b/src/include/distributed/background_jobs.h
@@ -102,8 +102,8 @@ typedef struct ParallelTasksPerNodeEntry
extern BackgroundWorkerHandle * StartCitusBackgroundTaskQueueMonitor(Oid database,
Oid extensionOwner);
-extern void CitusBackgroundTaskQueueMonitorMain(Datum arg);
-extern void CitusBackgroundTaskExecutor(Datum main_arg);
+extern PGDLLEXPORT void CitusBackgroundTaskQueueMonitorMain(Datum arg);
+extern PGDLLEXPORT void CitusBackgroundTaskExecutor(Datum main_arg);
extern Datum citus_job_cancel(PG_FUNCTION_ARGS);
extern Datum citus_job_wait(PG_FUNCTION_ARGS);
diff --git a/src/include/distributed/citus_safe_lib.h b/src/include/distributed/citus_safe_lib.h
index 62142df0a..c22e5ecba 100644
--- a/src/include/distributed/citus_safe_lib.h
+++ b/src/include/distributed/citus_safe_lib.h
@@ -26,7 +26,7 @@ extern void SafeQsort(void *ptr, rsize_t count, rsize_t size,
void * SafeBsearch(const void *key, const void *ptr, rsize_t count, rsize_t size,
int (*comp)(const void *, const void *));
int SafeSnprintf(char *str, rsize_t count, const char *fmt, ...) pg_attribute_printf(3,
- 0);
+ 4);
#define memset_struct_0(variable) memset(&variable, 0, sizeof(variable))
diff --git a/src/include/distributed/colocation_utils.h b/src/include/distributed/colocation_utils.h
index c9fcf4776..bba78afd1 100644
--- a/src/include/distributed/colocation_utils.h
+++ b/src/include/distributed/colocation_utils.h
@@ -24,7 +24,8 @@ extern bool ShardsColocated(ShardInterval *leftShardInterval,
extern List * ColocatedTableList(Oid distributedTableId);
extern List * ColocatedShardIntervalList(ShardInterval *shardInterval);
extern List * ColocatedNonPartitionShardIntervalList(ShardInterval *shardInterval);
-extern Oid ColocatedTableId(Oid colocationId);
+extern Oid ColocatedTableId(int32 colocationId);
+extern uint32 SingleShardTableColocationNodeId(uint32 colocationId);
extern uint64 ColocatedShardIdInRelation(Oid relationId, int shardIndex);
uint32 ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType,
Oid distributionColumnCollation);
diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h
index a013f3977..429016f9f 100644
--- a/src/include/distributed/commands.h
+++ b/src/include/distributed/commands.h
@@ -220,6 +220,21 @@ extern List * AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok, bool
isPostprocess);
extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address);
+extern List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext);
+
+extern List * PreprocessAlterDatabaseStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext);
+
+extern List * PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString,
+ ProcessUtilityContext
+ processUtilityContext);
+
+
+extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString,
+ ProcessUtilityContext processUtilityContext);
+
+
/* domain.c - forward declarations */
extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok, bool
isPostprocess);
@@ -235,6 +250,7 @@ extern List * RenameDomainStmtObjectAddress(Node *node, bool missing_ok, bool
extern CreateDomainStmt * RecreateDomainStmt(Oid domainOid);
extern Oid get_constraint_typid(Oid conoid);
+
/* extension.c - forward declarations */
extern bool IsDropCitusExtensionStmt(Node *parsetree);
extern List * GetDependentFDWsToExtension(Oid extensionId);
@@ -288,6 +304,7 @@ extern bool ColumnAppearsInForeignKeyToReferenceTable(char *columnName, Oid
relationId);
extern List * GetReferencingForeignConstaintCommands(Oid relationOid);
extern List * GetForeignConstraintToReferenceTablesCommands(Oid relationId);
+extern List * GetForeignConstraintFromOtherReferenceTablesCommands(Oid relationId);
extern List * GetForeignConstraintToDistributedTablesCommands(Oid relationId);
extern List * GetForeignConstraintFromDistributedTablesCommands(Oid relationId);
extern List * GetForeignConstraintCommandsInternal(Oid relationId, int flags);
@@ -582,6 +599,8 @@ extern bool ShouldEnableLocalReferenceForeignKeys(void);
extern List * PreprocessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement,
const char *queryString);
extern List * PostprocessAlterTableSchemaStmt(Node *node, const char *queryString);
+extern void PrepareAlterTableStmtForConstraint(AlterTableStmt *alterTableStatement,
+ Oid relationId, Constraint *constraint);
extern List * PreprocessAlterTableStmt(Node *node, const char *alterTableCommand,
ProcessUtilityContext processUtilityContext);
extern List * PreprocessAlterTableMoveAllStmt(Node *node, const char *queryString,
@@ -599,6 +618,8 @@ extern void ErrorUnsupportedAlterTableAddColumn(Oid relationId, AlterTableCmd *c
extern void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod,
char referencingReplicationModel,
Var *distributionColumn, uint32 colocationId);
+extern List * InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId,
+ const char *commandString);
extern List * AlterTableSchemaStmtObjectAddress(Node *stmt,
bool missing_ok, bool isPostprocess);
extern List * MakeNameListFromRangeVar(const RangeVar *rel);
@@ -809,7 +830,6 @@ extern void UpdateAutoConvertedForConnectedRelations(List *relationId, bool
/* schema_based_sharding.c */
extern bool ShouldUseSchemaBasedSharding(char *schemaName);
extern bool ShouldCreateTenantSchemaTable(Oid relationId);
-extern bool IsTenantSchema(Oid schemaId);
extern void EnsureTenantTable(Oid relationId, char *operationName);
extern void ErrorIfIllegalPartitioningInTenantSchema(Oid parentRelationId,
Oid partitionRelationId);
diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h
index 4255c952d..fa59894ad 100644
--- a/src/include/distributed/commands/multi_copy.h
+++ b/src/include/distributed/commands/multi_copy.h
@@ -182,7 +182,7 @@ extern void AppendCopyRowData(Datum *valueArray, bool *isNullArray,
extern void AppendCopyBinaryHeaders(CopyOutState headerOutputState);
extern void AppendCopyBinaryFooters(CopyOutState footerOutputState);
extern void EndRemoteCopy(int64 shardId, List *connectionList);
-extern List * CreateRangeTable(Relation rel, AclMode requiredAccess);
+extern List * CreateRangeTable(Relation rel);
extern Node * ProcessCopyStmt(CopyStmt *copyStatement,
QueryCompletion *completionTag,
const char *queryString);
diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h
index f08124123..158f0b1ce 100644
--- a/src/include/distributed/connection_management.h
+++ b/src/include/distributed/connection_management.h
@@ -134,7 +134,7 @@ enum MultiConnectionMode
/*
- * This state is used for keeping track of the initilization
+ * This state is used for keeping track of the initialization
* of the underlying pg_conn struct.
*/
typedef enum MultiConnectionState
@@ -149,7 +149,7 @@ typedef enum MultiConnectionState
/*
- * This state is used for keeping track of the initilization
+ * This state is used for keeping track of the initialization
* of MultiConnection struct, not specifically the underlying
* pg_conn. The state is useful to determine the action during
* clean-up of connections.
@@ -207,7 +207,7 @@ typedef struct MultiConnection
instr_time connectionEstablishmentStart;
instr_time connectionEstablishmentEnd;
- /* membership in list of list of connections in ConnectionHashEntry */
+ /* membership in list of connections in ConnectionHashEntry */
dlist_node connectionNode;
/* information about the associated remote transaction */
@@ -229,7 +229,7 @@ typedef struct MultiConnection
/* replication option */
bool requiresReplication;
- MultiConnectionStructInitializationState initilizationState;
+ MultiConnectionStructInitializationState initializationState;
} MultiConnection;
diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h
index 7f90eadda..0dcc66141 100644
--- a/src/include/distributed/coordinator_protocol.h
+++ b/src/include/distributed/coordinator_protocol.h
@@ -156,7 +156,7 @@ struct TableDDLCommand
/*
* This union contains one (1) typed field for every implementation for
- * TableDDLCommand. A union enforces no overloading of fields but instead requiers at
+ * TableDDLCommand. A union enforces no overloading of fields but instead requires at
* most one of the fields to be used at any time.
*/
union
@@ -250,11 +250,10 @@ extern void CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId
List *workerNodeList, int
replicationFactor);
extern void CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements,
- bool useExclusiveConnection,
- bool colocatedShard);
-extern List * InsertShardPlacementRows(Oid relationId, int64 shardId,
- List *workerNodeList, int workerStartIndex,
- int replicationFactor);
+ bool useExclusiveConnection);
+extern void InsertShardPlacementRows(Oid relationId, int64 shardId,
+ List *workerNodeList, int workerStartIndex,
+ int replicationFactor);
extern uint64 UpdateShardStatistics(int64 shardId);
extern void CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount,
int32 replicationFactor,
@@ -264,9 +263,9 @@ extern void CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId,
extern void CreateReferenceTableShard(Oid distributedTableId);
extern void CreateSingleShardTableShardWithRoundRobinPolicy(Oid relationId,
uint32 colocationId);
-extern List * WorkerCreateShardCommandList(Oid relationId, int shardIndex, uint64 shardId,
- List *ddlCommandList,
- List *foreignConstraintCommandList);
+extern int EmptySingleShardTableColocationDecideNodeId(uint32 colocationId);
+extern List * WorkerCreateShardCommandList(Oid relationId, uint64 shardId,
+ List *ddlCommandList);
extern Oid ForeignConstraintGetReferencedTableId(const char *queryString);
extern void CheckHashPartitionedTable(Oid distributedTableId);
extern void CheckTableSchemaNameForDrop(Oid relationId, char **schemaName,
diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h
index 7390b486c..6ffe33cc2 100644
--- a/src/include/distributed/deparser.h
+++ b/src/include/distributed/deparser.h
@@ -109,6 +109,18 @@ extern char * DeparseAlterSchemaOwnerStmt(Node *node);
extern void AppendGrantPrivileges(StringInfo buf, GrantStmt *stmt);
extern void AppendGrantGrantees(StringInfo buf, GrantStmt *stmt);
+extern void AppendWithGrantOption(StringInfo buf, GrantStmt *stmt);
+extern void AppendGrantOptionFor(StringInfo buf, GrantStmt *stmt);
+extern void AppendGrantRestrictAndCascadeForRoleSpec(StringInfo buf, DropBehavior
+ behavior, bool isGrant);
+extern void AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt);
+extern void AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool
+ isGrant);
+extern void AppendGrantedByInGrant(StringInfo buf, GrantStmt *stmt);
+
+extern void AppendGrantSharedPrefix(StringInfo buf, GrantStmt *stmt);
+extern void AppendGrantSharedSuffix(StringInfo buf, GrantStmt *stmt);
+
/* forward declarations for deparse_statistics_stmts.c */
extern char * DeparseCreateStatisticsStmt(Node *node);
@@ -210,6 +222,11 @@ extern char * DeparseAlterExtensionStmt(Node *stmt);
/* forward declarations for deparse_database_stmts.c */
extern char * DeparseAlterDatabaseOwnerStmt(Node *node);
+extern char * DeparseGrantOnDatabaseStmt(Node *node);
+extern char * DeparseAlterDatabaseStmt(Node *node);
+extern char * DeparseAlterDatabaseRefreshCollStmt(Node *node);
+extern char * DeparseAlterDatabaseSetStmt(Node *node);
+
/* forward declaration for deparse_publication_stmts.c */
extern char * DeparseCreatePublicationStmt(Node *stmt);
diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h
index aac936a98..d46fbf2e6 100644
--- a/src/include/distributed/distributed_planner.h
+++ b/src/include/distributed/distributed_planner.h
@@ -234,6 +234,8 @@ extern List * TranslatedVarsForRteIdentity(int rteIdentity);
extern struct DistributedPlan * GetDistributedPlan(CustomScan *node);
extern void multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
Index restrictionIndex, RangeTblEntry *rte);
+extern void multi_get_relation_info_hook(PlannerInfo *root, Oid relationObjectId, bool
+ inhparent, RelOptInfo *rel);
extern void multi_join_restriction_hook(PlannerInfo *root,
RelOptInfo *joinrel,
RelOptInfo *outerrel,
diff --git a/src/include/distributed/errormessage.h b/src/include/distributed/errormessage.h
index 7e0b1c33d..3c19a9c83 100644
--- a/src/include/distributed/errormessage.h
+++ b/src/include/distributed/errormessage.h
@@ -12,6 +12,7 @@
#include "c.h"
#include "distributed/citus_nodes.h"
+#include "pg_version_compat.h"
typedef struct DeferredErrorMessage
diff --git a/src/include/distributed/maintenanced.h b/src/include/distributed/maintenanced.h
index a09d89085..de1e68883 100644
--- a/src/include/distributed/maintenanced.h
+++ b/src/include/distributed/maintenanced.h
@@ -29,6 +29,6 @@ extern void MaintenanceDaemonShmemInit(void);
extern void InitializeMaintenanceDaemonBackend(void);
extern bool LockCitusExtension(void);
-extern void CitusMaintenanceDaemonMain(Datum main_arg);
+extern PGDLLEXPORT void CitusMaintenanceDaemonMain(Datum main_arg);
#endif /* MAINTENANCED_H */
diff --git a/src/include/distributed/merge_planner.h b/src/include/distributed/merge_planner.h
index 1548dae6a..898292603 100644
--- a/src/include/distributed/merge_planner.h
+++ b/src/include/distributed/merge_planner.h
@@ -30,7 +30,7 @@ extern bool IsLocalTableModification(Oid targetRelationId, Query *query,
extern void NonPushableMergeCommandExplainScan(CustomScanState *node, List *ancestors,
struct ExplainState *es);
extern Var * FetchAndValidateInsertVarIfExists(Oid targetRelationId, Query *query);
-extern RangeTblEntry * ExtractMergeSourceRangeTableEntry(Query *query);
+extern RangeTblEntry * ExtractMergeSourceRangeTableEntry(Query *query, bool joinSourceOk);
#endif /* MERGE_PLANNER_H */
diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h
index dd98da764..34b95b859 100644
--- a/src/include/distributed/metadata_cache.h
+++ b/src/include/distributed/metadata_cache.h
@@ -137,8 +137,11 @@ typedef enum
ANY_CITUS_TABLE_TYPE
} CitusTableType;
+void InvalidateDistRelationCacheCallback(Datum argument, Oid relationId);
+
extern List * AllCitusTableIds(void);
extern bool IsCitusTableType(Oid relationId, CitusTableType tableType);
+extern CitusTableType GetCitusTableType(CitusTableCacheEntry *tableEntry);
extern bool IsCitusTableTypeCacheEntry(CitusTableCacheEntry *tableEtnry,
CitusTableType tableType);
bool HasDistributionKey(Oid relationId);
@@ -320,6 +323,6 @@ extern const char * CurrentDatabaseName(void);
/* connection-related functions */
extern char * GetAuthinfoViaCatalog(const char *roleName, int64 nodeId);
-extern char * GetPoolinfoViaCatalog(int64 nodeId);
+extern char * GetPoolinfoViaCatalog(int32 nodeId);
#endif /* METADATA_CACHE_H */
diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h
index 64e684000..237df363a 100644
--- a/src/include/distributed/metadata_sync.h
+++ b/src/include/distributed/metadata_sync.h
@@ -119,7 +119,7 @@ extern List * InterTableRelationshipOfRelationCommandList(Oid relationId);
extern List * DetachPartitionCommandList(void);
extern void SyncNodeMetadataToNodes(void);
extern BackgroundWorkerHandle * SpawnSyncNodeMetadataToNodes(Oid database, Oid owner);
-extern void SyncNodeMetadataToNodesMain(Datum main_arg);
+extern PGDLLEXPORT void SyncNodeMetadataToNodesMain(Datum main_arg);
extern void SignalMetadataSyncDaemon(Oid database, int sig);
extern bool ShouldInitiateMetadataSync(bool *lockFailure);
extern List * SequenceDependencyCommandList(Oid relationId);
@@ -139,6 +139,11 @@ extern void SyncNewColocationGroupToNodes(uint32 colocationId, int shardCount,
extern void SyncDeleteColocationGroupToNodes(uint32 colocationId);
extern char * TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId);
extern char * TenantSchemaDeleteCommand(char *schemaName);
+extern char * UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel,
+ uint32 colocationId, bool autoConverted);
+extern char * AddPlacementMetadataCommand(uint64 shardId, uint64 placementId,
+ uint64 shardLength, int32 groupId);
+extern char * DeletePlacementMetadataCommand(uint64 placementId);
extern MetadataSyncContext * CreateMetadataSyncContext(List *nodeList,
bool collectCommands,
diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h
index 6536e89bc..9234adc76 100644
--- a/src/include/distributed/metadata_utility.h
+++ b/src/include/distributed/metadata_utility.h
@@ -324,6 +324,7 @@ extern ShardInterval * CopyShardInterval(ShardInterval *srcInterval);
extern uint64 ShardLength(uint64 shardId);
extern bool NodeGroupHasShardPlacements(int32 groupId);
extern bool IsActiveShardPlacement(ShardPlacement *ShardPlacement);
+extern bool IsRemoteShardPlacement(ShardPlacement *shardPlacement);
extern bool IsPlacementOnWorkerNode(ShardPlacement *placement, WorkerNode *workerNode);
extern List * FilterShardPlacementList(List *shardPlacementList, bool (*filter)(
ShardPlacement *));
@@ -349,6 +350,10 @@ extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList);
extern void InsertShardRow(Oid relationId, uint64 shardId, char storageType,
text *shardMinValue, text *shardMaxValue);
extern void DeleteShardRow(uint64 shardId);
+extern ShardPlacement * InsertShardPlacementRowGlobally(uint64 shardId,
+ uint64 placementId,
+ uint64 shardLength,
+ int32 groupId);
extern uint64 InsertShardPlacementRow(uint64 shardId, uint64 placementId,
uint64 shardLength, int32 groupId);
extern void InsertIntoPgDistPartition(Oid relationId, char distributionMethod,
@@ -360,8 +365,13 @@ extern void UpdateDistributionColumnGlobally(Oid relationId, char distributionMe
extern void UpdateDistributionColumn(Oid relationId, char distributionMethod,
Var *distributionColumn, int colocationId);
extern void DeletePartitionRow(Oid distributedRelationId);
+extern void UpdateNoneDistTableMetadataGlobally(Oid relationId, char replicationModel,
+ uint32 colocationId, bool autoConverted);
+extern void UpdateNoneDistTableMetadata(Oid relationId, char replicationModel,
+ uint32 colocationId, bool autoConverted);
extern void DeleteShardRow(uint64 shardId);
extern void UpdatePlacementGroupId(uint64 placementId, int groupId);
+extern void DeleteShardPlacementRowGlobally(uint64 placementId);
extern void DeleteShardPlacementRow(uint64 placementId);
extern void CreateSingleShardTable(Oid relationId, ColocationParam colocationParam);
extern void CreateDistributedTable(Oid relationId, char *distributionColumnName,
@@ -369,6 +379,7 @@ extern void CreateDistributedTable(Oid relationId, char *distributionColumnName,
bool shardCountIsStrict, char *colocateWithTableName);
extern void CreateReferenceTable(Oid relationId);
extern void CreateTruncateTrigger(Oid relationId);
+extern uint64 CopyFromLocalTableIntoDistTable(Oid localTableId, Oid distributedTableId);
extern void EnsureUndistributeTenantTableSafe(Oid relationId, const char *operationName);
extern TableConversionReturn * UndistributeTable(TableConversionParameters *params);
extern void UndistributeTables(List *relationIdList);
diff --git a/src/include/distributed/multi_join_order.h b/src/include/distributed/multi_join_order.h
index 4e4ba1dd2..5eff90506 100644
--- a/src/include/distributed/multi_join_order.h
+++ b/src/include/distributed/multi_join_order.h
@@ -99,7 +99,8 @@ extern bool NodeIsEqualsOpExpr(Node *node);
extern bool IsSupportedReferenceJoin(JoinType joinType, bool leftIsReferenceTable,
bool rightIsReferenceTable);
extern OpExpr * SinglePartitionJoinClause(List *partitionColumnList,
- List *applicableJoinClauses);
+ List *applicableJoinClauses,
+ bool *foundTypeMismatch);
extern OpExpr * DualPartitionJoinClause(List *applicableJoinClauses);
extern Var * LeftColumnOrNULL(OpExpr *joinClause);
extern Var * RightColumnOrNULL(OpExpr *joinClause);
diff --git a/src/include/distributed/namespace_utils.h b/src/include/distributed/namespace_utils.h
index 7d64ead12..6be101d2a 100644
--- a/src/include/distributed/namespace_utils.h
+++ b/src/include/distributed/namespace_utils.h
@@ -10,6 +10,7 @@
#ifndef NAMESPACE_UTILS_H
#define NAMESPACE_UTILS_H
-extern void PushOverrideEmptySearchPath(MemoryContext memoryContext);
+extern int PushEmptySearchPath(void);
+extern void PopEmptySearchPath(int saveNestLevel);
#endif /* NAMESPACE_UTILS_H */
diff --git a/src/include/distributed/pg_version_constants.h b/src/include/distributed/pg_version_constants.h
index a85d72d84..9761dff83 100644
--- a/src/include/distributed/pg_version_constants.h
+++ b/src/include/distributed/pg_version_constants.h
@@ -14,5 +14,6 @@
#define PG_VERSION_14 140000
#define PG_VERSION_15 150000
#define PG_VERSION_16 160000
+#define PG_VERSION_17 170000
#endif /* PG_VERSION_CONSTANTS */
diff --git a/src/include/distributed/query_colocation_checker.h b/src/include/distributed/query_colocation_checker.h
index 969ecbcf9..562869a92 100644
--- a/src/include/distributed/query_colocation_checker.h
+++ b/src/include/distributed/query_colocation_checker.h
@@ -35,7 +35,8 @@ extern ColocatedJoinChecker CreateColocatedJoinChecker(Query *subquery,
restrictionContext);
extern bool SubqueryColocated(Query *subquery, ColocatedJoinChecker *context);
extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
- List *requiredAttributes);
+ List *requiredAttributes,
+ RTEPermissionInfo *perminfo);
extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes);
#endif /* QUERY_COLOCATION_CHECKER_H */
diff --git a/src/include/distributed/recursive_planning.h b/src/include/distributed/recursive_planning.h
index 8943443aa..a883047f6 100644
--- a/src/include/distributed/recursive_planning.h
+++ b/src/include/distributed/recursive_planning.h
@@ -42,7 +42,8 @@ extern bool GeneratingSubplans(void);
extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList);
extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
List *requiredAttrNumbers,
- RecursivePlanningContext *context);
+ RecursivePlanningContext *context,
+ RTEPermissionInfo *perminfo);
extern bool IsRecursivelyPlannableRelation(RangeTblEntry *rangeTableEntry);
extern bool IsRelationLocalTableOrMatView(Oid relationId);
extern bool ContainsReferencesToOuterQuery(Query *query);
diff --git a/src/include/distributed/relation_restriction_equivalence.h b/src/include/distributed/relation_restriction_equivalence.h
index 42b2b801f..f3a7e2b94 100644
--- a/src/include/distributed/relation_restriction_equivalence.h
+++ b/src/include/distributed/relation_restriction_equivalence.h
@@ -20,6 +20,7 @@
extern bool AllDistributionKeysInQueryAreEqual(Query *originalQuery,
PlannerRestrictionContext *
plannerRestrictionContext);
+extern bool IsRelOptOuterJoin(PlannerInfo *root, int varNo);
extern bool SafeToPushdownUnionSubquery(Query *originalQuery, PlannerRestrictionContext *
plannerRestrictionContext);
extern bool ContainsUnionSubquery(Query *queryTree);
diff --git a/src/include/distributed/relation_utils.h b/src/include/distributed/relation_utils.h
index 873398f00..acf84a9da 100644
--- a/src/include/distributed/relation_utils.h
+++ b/src/include/distributed/relation_utils.h
@@ -13,8 +13,16 @@
#include "postgres.h"
+#include "distributed/pg_version_constants.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+#include "parser/parse_relation.h"
+#endif
#include "utils/relcache.h"
extern char * RelationGetNamespaceName(Relation relation);
+#if PG_VERSION_NUM >= PG_VERSION_16
+extern RTEPermissionInfo * GetFilledPermissionInfo(Oid relid, bool inh,
+ AclMode requiredPerms);
+#endif
#endif /* RELATION_UTILS_H */
diff --git a/src/include/distributed/replicate_none_dist_table_shard.h b/src/include/distributed/replicate_none_dist_table_shard.h
new file mode 100644
index 000000000..187690fa8
--- /dev/null
+++ b/src/include/distributed/replicate_none_dist_table_shard.h
@@ -0,0 +1,20 @@
+/*-------------------------------------------------------------------------
+ *
+ * replicate_none_dist_table_shard.h
+ * Routines to replicate shard of none-distributed table to
+ * a remote node.
+ *
+ * Copyright (c) Citus Data, Inc.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef REPLICA_LOCAL_TABLE_SHARD_H
+#define REPLICA_LOCAL_TABLE_SHARD_H
+
+extern void NoneDistTableReplicateCoordinatorPlacement(Oid noneDistTableId,
+ List *targetNodeList);
+extern void NoneDistTableDeleteCoordinatorPlacement(Oid noneDistTableId);
+extern void NoneDistTableDropCoordinatorPlacementTable(Oid noneDistTableId);
+
+#endif /* REPLICA_LOCAL_TABLE_SHARD_H */
diff --git a/src/include/distributed/shard_transfer.h b/src/include/distributed/shard_transfer.h
index a37e5abdb..a6d024a2e 100644
--- a/src/include/distributed/shard_transfer.h
+++ b/src/include/distributed/shard_transfer.h
@@ -12,6 +12,9 @@
#include "distributed/shard_rebalancer.h"
#include "nodes/pg_list.h"
+extern Datum citus_move_shard_placement(PG_FUNCTION_ARGS);
+extern Datum citus_move_shard_placement_with_nodeid(PG_FUNCTION_ARGS);
+
typedef enum
{
SHARD_TRANSFER_INVALID_FIRST = 0,
diff --git a/src/include/distributed/shared_library_init.h b/src/include/distributed/shared_library_init.h
index 63a7147af..3764b52fd 100644
--- a/src/include/distributed/shared_library_init.h
+++ b/src/include/distributed/shared_library_init.h
@@ -17,10 +17,10 @@
#define MAX_SHARD_COUNT 64000
#define MAX_SHARD_REPLICATION_FACTOR 100
-extern ColumnarSupportsIndexAM_type extern_ColumnarSupportsIndexAM;
-extern CompressionTypeStr_type extern_CompressionTypeStr;
-extern IsColumnarTableAmTable_type extern_IsColumnarTableAmTable;
-extern ReadColumnarOptions_type extern_ReadColumnarOptions;
+extern PGDLLEXPORT ColumnarSupportsIndexAM_type extern_ColumnarSupportsIndexAM;
+extern PGDLLEXPORT CompressionTypeStr_type extern_CompressionTypeStr;
+extern PGDLLEXPORT IsColumnarTableAmTable_type extern_IsColumnarTableAmTable;
+extern PGDLLEXPORT ReadColumnarOptions_type extern_ReadColumnarOptions;
extern void StartupCitusBackend(void);
extern const char * GetClientMinMessageLevelNameForValue(int minMessageLevel);
diff --git a/src/include/distributed/transaction_management.h b/src/include/distributed/transaction_management.h
index e2d35048a..ca4e632a9 100644
--- a/src/include/distributed/transaction_management.h
+++ b/src/include/distributed/transaction_management.h
@@ -10,11 +10,13 @@
#define TRANSACTION_MANAGMENT_H
#include "access/xact.h"
+#include "catalog/objectaddress.h"
#include "lib/ilist.h"
#include "lib/stringinfo.h"
#include "nodes/pg_list.h"
#include "lib/stringinfo.h"
#include "nodes/primnodes.h"
+#include "utils/hsearch.h"
/* forward declare, to avoid recursive includes */
struct DistObjectCacheEntry;
@@ -58,6 +60,7 @@ typedef struct SubXactContext
{
SubTransactionId subId;
StringInfo setLocalCmds;
+ HTAB *propagatedObjects;
} SubXactContext;
/*
@@ -157,6 +160,11 @@ extern bool IsMultiStatementTransaction(void);
extern void EnsureDistributedTransactionId(void);
extern bool MaybeExecutingUDF(void);
+/* functions for tracking the objects propagated in current transaction */
+extern void TrackPropagatedObject(const ObjectAddress *objectAddress);
+extern void TrackPropagatedTableAndSequences(Oid relationId);
+extern void ResetPropagatedObjects(void);
+extern bool HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress);
/* initialization function(s) */
extern void InitializeTransactionManagement(void);
diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h
index be8fe5ed6..631940edf 100644
--- a/src/include/distributed/worker_transaction.h
+++ b/src/include/distributed/worker_transaction.h
@@ -73,6 +73,7 @@ extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(cons
commandList);
extern void SendCommandToWorkersWithMetadata(const char *command);
extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command);
+extern void SendCommandListToWorkersWithMetadata(List *commands);
extern void SendBareCommandListToMetadataWorkers(List *commandList);
extern void EnsureNoModificationsHaveBeenDone(void);
extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName,
diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h
index 00c5e286b..1bdbae580 100644
--- a/src/include/pg_version_compat.h
+++ b/src/include/pg_version_compat.h
@@ -13,6 +13,144 @@
#include "distributed/pg_version_constants.h"
+#if PG_VERSION_NUM >= PG_VERSION_16
+
+#include "utils/guc_tables.h"
+
+#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a, b)
+
+#define RelationPhysicalIdentifier_compat(a) ((a)->rd_locator)
+#define RelationTablespace_compat(a) (a.spcOid)
+#define RelationPhysicalIdentifierNumber_compat(a) (a.relNumber)
+#define RelationPhysicalIdentifierNumberPtr_compat(a) (a->relNumber)
+#define RelationPhysicalIdentifierBackend_compat(a) (a->smgr_rlocator.locator)
+
+#define float_abs(a) fabs(a)
+
+#define tuplesort_getdatum_compat(a, b, c, d, e, f) tuplesort_getdatum(a, b, c, d, e, f)
+
+static inline struct config_generic **
+get_guc_variables_compat(int *gucCount)
+{
+ return get_guc_variables(gucCount);
+}
+
+
+#define PG_FUNCNAME_MACRO __func__
+
+#define stringToQualifiedNameList_compat(a) stringToQualifiedNameList(a, NULL)
+#define typeStringToTypeName_compat(a, b) typeStringToTypeName(a, b)
+
+#define get_relids_in_jointree_compat(a, b, c) get_relids_in_jointree(a, b, c)
+
+#define object_ownercheck(a, b, c) object_ownercheck(a, b, c)
+#define object_aclcheck(a, b, c, d) object_aclcheck(a, b, c, d)
+
+#define pgstat_fetch_stat_local_beentry(a) pgstat_get_local_beentry_by_index(a)
+
+#else
+
+#include "catalog/pg_class_d.h"
+#include "catalog/pg_namespace.h"
+#include "catalog/pg_proc_d.h"
+#include "storage/relfilenode.h"
+#include "utils/guc.h"
+#include "utils/guc_tables.h"
+
+#define pg_clean_ascii_compat(a, b) pg_clean_ascii(a)
+
+#define RelationPhysicalIdentifier_compat(a) ((a)->rd_node)
+#define RelationTablespace_compat(a) (a.spcNode)
+#define RelationPhysicalIdentifierNumber_compat(a) (a.relNode)
+#define RelationPhysicalIdentifierNumberPtr_compat(a) (a->relNode)
+#define RelationPhysicalIdentifierBackend_compat(a) (a->smgr_rnode.node)
+typedef RelFileNode RelFileLocator;
+typedef Oid RelFileNumber;
+#define RelidByRelfilenumber(a, b) RelidByRelfilenode(a, b)
+
+#define float_abs(a) Abs(a)
+
+#define tuplesort_getdatum_compat(a, b, c, d, e, f) tuplesort_getdatum(a, b, d, e, f)
+
+static inline struct config_generic **
+get_guc_variables_compat(int *gucCount)
+{
+ *gucCount = GetNumConfigOptions();
+ return get_guc_variables();
+}
+
+
+#define stringToQualifiedNameList_compat(a) stringToQualifiedNameList(a)
+#define typeStringToTypeName_compat(a, b) typeStringToTypeName(a)
+
+#define get_relids_in_jointree_compat(a, b, c) get_relids_in_jointree(a, b)
+
+static inline bool
+object_ownercheck(Oid classid, Oid objectid, Oid roleid)
+{
+ switch (classid)
+ {
+ case RelationRelationId:
+ {
+ return pg_class_ownercheck(objectid, roleid);
+ }
+
+ case NamespaceRelationId:
+ {
+ return pg_namespace_ownercheck(objectid, roleid);
+ }
+
+ case ProcedureRelationId:
+ {
+ return pg_proc_ownercheck(objectid, roleid);
+ }
+
+ default:
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Missing classid:%d",
+ classid)));
+ }
+ }
+}
+
+
+static inline AclResult
+object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode)
+{
+ switch (classid)
+ {
+ case NamespaceRelationId:
+ {
+ return pg_namespace_aclcheck(objectid, roleid, mode);
+ }
+
+ case ProcedureRelationId:
+ {
+ return pg_proc_aclcheck(objectid, roleid, mode);
+ }
+
+ default:
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("Missing classid:%d",
+ classid)));
+ }
+ }
+}
+
+
+typedef bool TU_UpdateIndexes;
+
+/*
+ * we define RTEPermissionInfo for PG16 compatibility
+ * There are some functions that need to include RTEPermissionInfo in their signature
+ * for PG14/PG15 we pass a NULL argument in these functions
+ */
+typedef RangeTblEntry RTEPermissionInfo;
+
+#endif
+
#if PG_VERSION_NUM >= PG_VERSION_15
#define ProcessCompletedNotifies()
#define RelationCreateStorage_compat(a, b, c) RelationCreateStorage(a, b, c)
diff --git a/src/test/cdc/t/016_cdc_wal2json.pl b/src/test/cdc/t/016_cdc_wal2json.pl
index ab384df64..10475ba85 100644
--- a/src/test/cdc/t/016_cdc_wal2json.pl
+++ b/src/test/cdc/t/016_cdc_wal2json.pl
@@ -9,6 +9,13 @@ use cdctestlib;
use threads;
+my $pg_major_version = int($ENV{'pg_major_version'});
+print("working with PG major version : $pg_major_version\n");
+if ($pg_major_version >= 16) {
+ plan skip_all => 'wal2json is not available for PG16 yet';
+ exit 0;
+}
+
# Initialize co-ordinator node
my $select_stmt = qq(SELECT * FROM data_100008 ORDER BY id;);
my $result = 0;
diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock
index ed604c2c0..e6717e5fc 100644
--- a/src/test/regress/Pipfile.lock
+++ b/src/test/regress/Pipfile.lock
@@ -21,7 +21,6 @@
"sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9",
"sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214"
],
- "markers": "python_version >= '3.6'",
"version": "==3.4.1"
},
"blinker": {
@@ -119,11 +118,10 @@
},
"certifi": {
"hashes": [
- "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3",
- "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"
+ "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
+ "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
],
- "markers": "python_version >= '3.6'",
- "version": "==2022.12.7"
+ "version": "==2023.7.22"
},
"cffi": {
"hashes": [
@@ -199,7 +197,6 @@
"sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1",
"sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"
],
- "markers": "python_version >= '3.6'",
"version": "==8.0.4"
},
"construct": {
@@ -211,28 +208,32 @@
},
"cryptography": {
"hashes": [
- "sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440",
- "sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288",
- "sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b",
- "sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958",
- "sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b",
- "sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d",
- "sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a",
- "sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404",
- "sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b",
- "sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e",
- "sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2",
- "sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c",
- "sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b",
- "sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9",
- "sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b",
- "sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636",
- "sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99",
- "sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e",
- "sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"
+ "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306",
+ "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84",
+ "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47",
+ "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d",
+ "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116",
+ "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207",
+ "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81",
+ "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087",
+ "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd",
+ "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507",
+ "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858",
+ "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae",
+ "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34",
+ "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906",
+ "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd",
+ "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922",
+ "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7",
+ "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4",
+ "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574",
+ "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1",
+ "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c",
+ "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e",
+ "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"
],
"index": "pypi",
- "version": "==40.0.2"
+ "version": "==41.0.3"
},
"docopt": {
"hashes": [
@@ -243,34 +244,32 @@
},
"exceptiongroup": {
"hashes": [
- "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e",
- "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"
+ "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9",
+ "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"
],
"markers": "python_version < '3.11'",
- "version": "==1.1.1"
+ "version": "==1.1.3"
},
"execnet": {
"hashes": [
- "sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5",
- "sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"
+ "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
+ "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
- "version": "==1.9.0"
+ "version": "==2.0.2"
},
"filelock": {
"hashes": [
- "sha256:ad98852315c2ab702aeb628412cbf7e95b7ce8c3bf9565670b4eaecf1db370a9",
- "sha256:fc03ae43288c013d2ea83c8597001b1129db351aad9c57fe2409327916b8e718"
+ "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81",
+ "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"
],
"index": "pypi",
- "version": "==3.12.0"
+ "version": "==3.12.2"
},
"flask": {
"hashes": [
"sha256:59da8a3170004800a2837844bfa84d49b022550616070f7cb1a659682b2e7c9f",
"sha256:e1120c228ca2f553b470df4a5fa927ab66258467526069981b3eb0a91902687d"
],
- "markers": "python_version >= '3.6'",
"version": "==2.0.3"
},
"h11": {
@@ -278,7 +277,6 @@
"sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6",
"sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042"
],
- "markers": "python_version >= '3.6'",
"version": "==0.12.0"
},
"h2": {
@@ -286,7 +284,6 @@
"sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d",
"sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"
],
- "markers": "python_full_version >= '3.6.1'",
"version": "==4.1.0"
},
"hpack": {
@@ -294,7 +291,6 @@
"sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c",
"sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"
],
- "markers": "python_full_version >= '3.6.1'",
"version": "==4.0.0"
},
"hyperframe": {
@@ -302,7 +298,6 @@
"sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15",
"sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"
],
- "markers": "python_full_version >= '3.6.1'",
"version": "==6.0.1"
},
"iniconfig": {
@@ -310,7 +305,6 @@
"sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
"sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"
],
- "markers": "python_version >= '3.7'",
"version": "==2.0.0"
},
"itsdangerous": {
@@ -318,7 +312,6 @@
"sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44",
"sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"
],
- "markers": "python_version >= '3.7'",
"version": "==2.1.2"
},
"jinja2": {
@@ -326,7 +319,6 @@
"sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
"sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
],
- "markers": "python_version >= '3.7'",
"version": "==3.1.2"
},
"kaitaistruct": {
@@ -347,59 +339,58 @@
},
"markupsafe": {
"hashes": [
- "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed",
- "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc",
- "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2",
- "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460",
- "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7",
- "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0",
- "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1",
- "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa",
- "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03",
- "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323",
- "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65",
- "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013",
- "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036",
- "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f",
- "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4",
- "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419",
- "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2",
- "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619",
- "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a",
- "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a",
- "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd",
- "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7",
- "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666",
- "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65",
- "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859",
- "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625",
- "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff",
- "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156",
- "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd",
- "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba",
- "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f",
- "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1",
- "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094",
- "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a",
- "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513",
- "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed",
- "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d",
- "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3",
- "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147",
- "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c",
- "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603",
- "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601",
- "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a",
- "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1",
- "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d",
- "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3",
- "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54",
- "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2",
- "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6",
- "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"
+ "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e",
+ "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e",
+ "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431",
+ "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686",
+ "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559",
+ "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc",
+ "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c",
+ "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0",
+ "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4",
+ "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9",
+ "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575",
+ "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba",
+ "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d",
+ "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3",
+ "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00",
+ "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155",
+ "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac",
+ "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52",
+ "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f",
+ "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8",
+ "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b",
+ "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24",
+ "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea",
+ "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198",
+ "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0",
+ "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee",
+ "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be",
+ "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2",
+ "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707",
+ "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6",
+ "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58",
+ "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779",
+ "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636",
+ "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c",
+ "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad",
+ "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee",
+ "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc",
+ "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2",
+ "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48",
+ "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7",
+ "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e",
+ "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b",
+ "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa",
+ "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5",
+ "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e",
+ "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb",
+ "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9",
+ "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57",
+ "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc",
+ "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.1.2"
+ "version": "==2.1.3"
},
"mitmproxy": {
"editable": true,
@@ -479,7 +470,6 @@
"sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
"sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
],
- "markers": "python_version >= '3.7'",
"version": "==23.1"
},
"passlib": {
@@ -491,11 +481,10 @@
},
"pluggy": {
"hashes": [
- "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
- "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
+ "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849",
+ "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"
],
- "markers": "python_version >= '3.6'",
- "version": "==1.0.0"
+ "version": "==1.2.0"
},
"protobuf": {
"hashes": [
@@ -521,16 +510,15 @@
"sha256:e68ad00695547d9397dd14abd3efba23cb31cef67228f4512d41396971889812",
"sha256:e9bffd52d6ee039a1cafb72475b2900c6fd0f0dca667fb7a09af0a3e119e78cb"
],
- "markers": "python_version >= '3.5'",
"version": "==3.18.3"
},
"psycopg": {
"hashes": [
- "sha256:59b4a71536b146925513c0234dfd1dc42b81e65d56ce5335dff4813434dbc113",
- "sha256:b1500c42063abaa01d30b056f0b300826b8dd8d586900586029a294ce74af327"
+ "sha256:15b25741494344c24066dc2479b0f383dd1b82fa5e75612fa4fa5bb30726e9b6",
+ "sha256:8bbeddae5075c7890b2fa3e3553440376d3c5e28418335dee3c3656b06fa2b52"
],
"index": "pypi",
- "version": "==3.1.8"
+ "version": "==3.1.10"
},
"publicsuffix2": {
"hashes": [
@@ -544,7 +532,6 @@
"sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57",
"sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"
],
- "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==0.5.0"
},
"pycparser": {
@@ -556,18 +543,16 @@
},
"pyopenssl": {
"hashes": [
- "sha256:841498b9bec61623b1b6c47ebbc02367c07d60e0e195f19790817f10cc8db0b7",
- "sha256:9e0c526404a210df9d2b18cd33364beadb0dc858a739b885677bc65e105d4a4c"
+ "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2",
+ "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"
],
- "markers": "python_version >= '3.6'",
- "version": "==23.1.1"
+ "version": "==23.2.0"
},
"pyparsing": {
"hashes": [
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
- "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'",
"version": "==2.4.7"
},
"pyperclip": {
@@ -578,19 +563,19 @@
},
"pytest": {
"hashes": [
- "sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362",
- "sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3"
+ "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32",
+ "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"
],
"index": "pypi",
- "version": "==7.3.1"
+ "version": "==7.4.0"
},
"pytest-asyncio": {
"hashes": [
- "sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b",
- "sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"
+ "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d",
+ "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"
],
"index": "pypi",
- "version": "==0.21.0"
+ "version": "==0.21.1"
},
"pytest-repeat": {
"hashes": [
@@ -610,64 +595,63 @@
},
"pytest-xdist": {
"hashes": [
- "sha256:1849bd98d8b242b948e472db7478e090bf3361912a8fed87992ed94085f54727",
- "sha256:37290d161638a20b672401deef1cba812d110ac27e35d213f091d15b8beb40c9"
+ "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93",
+ "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2"
],
"index": "pypi",
- "version": "==3.2.1"
+ "version": "==3.3.1"
},
"pyyaml": {
"hashes": [
- "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf",
- "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293",
- "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b",
- "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57",
- "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b",
- "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4",
- "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07",
- "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba",
- "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9",
- "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287",
- "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513",
- "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0",
- "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782",
- "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0",
- "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92",
- "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f",
- "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2",
- "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc",
- "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1",
- "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c",
- "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86",
- "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4",
- "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c",
- "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34",
- "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b",
- "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d",
- "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c",
- "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb",
- "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7",
- "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737",
- "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3",
- "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d",
- "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358",
- "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53",
- "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78",
- "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803",
- "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a",
- "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f",
- "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174",
- "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"
+ "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc",
+ "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741",
+ "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206",
+ "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27",
+ "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595",
+ "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62",
+ "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98",
+ "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696",
+ "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d",
+ "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867",
+ "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47",
+ "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486",
+ "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6",
+ "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3",
+ "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007",
+ "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
+ "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c",
+ "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735",
+ "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
+ "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
+ "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
+ "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
+ "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
+ "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
+ "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0",
+ "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
+ "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c",
+ "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c",
+ "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924",
+ "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34",
+ "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
+ "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859",
+ "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673",
+ "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
+ "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab",
+ "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa",
+ "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c",
+ "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585",
+ "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d",
+ "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"
],
"index": "pypi",
- "version": "==6.0"
+ "version": "==6.0.1"
},
"ruamel.yaml": {
"hashes": [
"sha256:1a771fc92d3823682b7f0893ad56cb5a5c87c48e62b5399d6f42c8759a583b33",
"sha256:ea21da1198c4b41b8e7a259301cc9710d3b972bf8ba52f06218478e6802dd1f1"
],
- "markers": "python_version >= '3'",
"version": "==0.17.16"
},
"ruamel.yaml.clib": {
@@ -675,6 +659,7 @@
"sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e",
"sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3",
"sha256:184faeaec61dbaa3cace407cffc5819f7b977e75360e8d5ca19461cd851a5fc5",
+ "sha256:1a6391a7cabb7641c32517539ca42cf84b87b667bad38b78d4d42dd23e957c81",
"sha256:1f08fd5a2bea9c4180db71678e850b995d2a5f4537be0e94557668cf0f5f9497",
"sha256:2aa261c29a5545adfef9296b7e33941f46aa5bbd21164228e833412af4c9c75f",
"sha256:3110a99e0f94a4a3470ff67fc20d3f96c25b13d24c6980ff841e82bafe827cac",
@@ -685,13 +670,13 @@
"sha256:4a4d8d417868d68b979076a9be6a38c676eca060785abaa6709c7b31593c35d1",
"sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072",
"sha256:5bc0667c1eb8f83a3752b71b9c4ba55ef7c7058ae57022dd9b29065186a113d9",
- "sha256:721bc4ba4525f53f6a611ec0967bdcee61b31df5a56801281027a3a6d1c2daf5",
"sha256:763d65baa3b952479c4e972669f679fe490eee058d5aa85da483ebae2009d231",
"sha256:7bdb4c06b063f6fd55e472e201317a3bb6cdeeee5d5a38512ea5c01e1acbdd93",
"sha256:8831a2cedcd0f0927f788c5bdf6567d9dc9cc235646a434986a852af1cb54b4b",
"sha256:91a789b4aa0097b78c93e3dc4b40040ba55bef518f84a40d4442f713b4094acb",
"sha256:92460ce908546ab69770b2e576e4f99fbb4ce6ab4b245345a3869a0a0410488f",
"sha256:99e77daab5d13a48a4054803d052ff40780278240a902b880dd37a51ba01a307",
+ "sha256:9c7617df90c1365638916b98cdd9be833d31d337dbcd722485597b43c4a215bf",
"sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8",
"sha256:a7b301ff08055d73223058b5c46c55638917f04d21577c95e00e0c4d79201a6b",
"sha256:be2a7ad8fd8f7442b24323d24ba0b56c51219513cfa45b9ada3b87b76c374d4b",
@@ -729,28 +714,26 @@
},
"tornado": {
"hashes": [
- "sha256:4546003dc8b5733489139d3bff5fa6a0211be505faf819bd9970e7c2b32e8122",
- "sha256:4d349846931557b7ec92f224b5d598b160e2ba26ae1812480b42e9622c884bf7",
- "sha256:6164571f5b9f73143d1334df4584cb9ac86d20c461e17b6c189a19ead8bb93c1",
- "sha256:6cfff1e9c15c79e106b8352269d201f8fc0815914a6260f3893ca18b724ea94b",
- "sha256:720f53e6367b38190ae7fa398c25c086c69d88b3c6535bd6021a126b727fb5cd",
- "sha256:912df5712024564e362ecce43c8d5862e14c78c8dd3846c9d889d44fbd7f4951",
- "sha256:c37b6a384d54ce6a31168d40ab21ad2591ddaf34973075cc0cad154402ecd9e8",
- "sha256:c659ab04d5aa477dbe44152c67d93f3ad3243b992d94f795ca1d5c73c37337ce",
- "sha256:c9114a61a4588c09065b9996ae05462350d17160b92b9bf9a1e93689cc0424dc",
- "sha256:d68f3192936ff2c4add04dc21a436a43b4408d466746b78bb2b9d0a53a18683f",
- "sha256:d7b737e18f701de3e4a3b0824260b4d740e4d60607b8089bb80e80ffd464780e"
+ "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f",
+ "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5",
+ "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d",
+ "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3",
+ "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2",
+ "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a",
+ "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16",
+ "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a",
+ "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17",
+ "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0",
+ "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"
],
- "markers": "python_version >= '3.8'",
- "version": "==6.3"
+ "version": "==6.3.3"
},
"typing-extensions": {
"hashes": [
- "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb",
- "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"
+ "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36",
+ "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"
],
- "markers": "python_version >= '3.7'",
- "version": "==4.5.0"
+ "version": "==4.7.1"
},
"urwid": {
"hashes": [
@@ -760,18 +743,16 @@
},
"werkzeug": {
"hashes": [
- "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe",
- "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"
+ "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8",
+ "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528"
],
- "markers": "python_version >= '3.7'",
- "version": "==2.2.3"
+ "version": "==2.3.7"
},
"wsproto": {
"hashes": [
"sha256:868776f8456997ad0d9720f7322b746bbe9193751b5b290b7f924659377c8c38",
"sha256:d8345d1808dd599b5ffb352c25a367adb6157e664e140dbecba3f9bc007edb9f"
],
- "markers": "python_full_version >= '3.6.1'",
"version": "==1.0.0"
},
"zstandard": {
@@ -825,7 +806,6 @@
"sha256:f98fc5750aac2d63d482909184aac72a979bfd123b112ec53fd365104ea15b1c",
"sha256:ff5b75f94101beaa373f1511319580a010f6e03458ee51b1a386d7de5331440a"
],
- "markers": "python_version >= '3.5'",
"version": "==0.15.2"
}
},
@@ -835,63 +815,58 @@
"sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
"sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
],
- "markers": "python_version >= '3.7'",
"version": "==23.1.0"
},
"black": {
"hashes": [
- "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5",
- "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915",
- "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326",
- "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940",
- "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b",
- "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30",
- "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c",
- "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c",
- "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab",
- "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27",
- "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2",
- "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961",
- "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9",
- "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb",
- "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70",
- "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331",
- "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2",
- "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266",
- "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d",
- "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6",
- "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b",
- "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925",
- "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8",
- "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4",
- "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"
+ "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3",
+ "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb",
+ "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087",
+ "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320",
+ "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6",
+ "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3",
+ "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc",
+ "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f",
+ "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587",
+ "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91",
+ "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a",
+ "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad",
+ "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926",
+ "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9",
+ "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be",
+ "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd",
+ "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96",
+ "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491",
+ "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2",
+ "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a",
+ "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f",
+ "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"
],
"index": "pypi",
- "version": "==23.3.0"
+ "version": "==23.7.0"
},
"click": {
"hashes": [
"sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1",
"sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"
],
- "markers": "python_version >= '3.6'",
"version": "==8.0.4"
},
"flake8": {
"hashes": [
- "sha256:3833794e27ff64ea4e9cf5d410082a8b97ff1a06c16aa3d2027339cd0f1195c7",
- "sha256:c61007e76655af75e6785a931f452915b371dc48f56efd765247c8fe68f2b181"
+ "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23",
+ "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"
],
"index": "pypi",
- "version": "==6.0.0"
+ "version": "==6.1.0"
},
"flake8-bugbear": {
"hashes": [
- "sha256:8a218d13abd6904800970381057ce8e72cde8eea743240c4ef3ede4dd0bc9cfb",
- "sha256:ea565bdb87b96b56dc499edd6cc3ba7f695373d902a5f56c989b74fad7c7719d"
+ "sha256:0ebdc7d8ec1ca8bd49347694562381f099f4de2f8ec6bda7a7dca65555d9e0d4",
+ "sha256:d99d005114020fbef47ed5e4aebafd22f167f9a0fbd0d8bf3c9e90612cb25c34"
],
"index": "pypi",
- "version": "==23.3.23"
+ "version": "==23.7.10"
},
"isort": {
"hashes": [
@@ -906,7 +881,6 @@
"sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
"sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
],
- "markers": "python_version >= '3.6'",
"version": "==0.7.0"
},
"mypy-extensions": {
@@ -914,7 +888,6 @@
"sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
"sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
],
- "markers": "python_version >= '3.5'",
"version": "==1.0.0"
},
"packaging": {
@@ -922,40 +895,35 @@
"sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
"sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
],
- "markers": "python_version >= '3.7'",
"version": "==23.1"
},
"pathspec": {
"hashes": [
- "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687",
- "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"
+ "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20",
+ "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"
],
- "markers": "python_version >= '3.7'",
- "version": "==0.11.1"
+ "version": "==0.11.2"
},
"platformdirs": {
"hashes": [
- "sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08",
- "sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"
+ "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d",
+ "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"
],
- "markers": "python_version >= '3.7'",
- "version": "==3.2.0"
+ "version": "==3.10.0"
},
"pycodestyle": {
"hashes": [
- "sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053",
- "sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610"
+ "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0",
+ "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8"
],
- "markers": "python_version >= '3.6'",
- "version": "==2.10.0"
+ "version": "==2.11.0"
},
"pyflakes": {
"hashes": [
- "sha256:ec55bf7fe21fff7f1ad2f7da62363d749e2a470500eab1b555334b67aa1ef8cf",
- "sha256:ec8b276a6b60bd80defed25add7e439881c19e64850afd9b346283d4165fd0fd"
+ "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774",
+ "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"
],
- "markers": "python_version >= '3.6'",
- "version": "==3.0.1"
+ "version": "==3.1.0"
},
"tomli": {
"hashes": [
@@ -967,11 +935,10 @@
},
"typing-extensions": {
"hashes": [
- "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb",
- "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"
+ "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36",
+ "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"
],
- "markers": "python_version >= '3.7'",
- "version": "==4.5.0"
+ "version": "==4.7.1"
}
}
}
diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed
index a374c73f5..efa9e310f 100644
--- a/src/test/regress/bin/normalize.sed
+++ b/src/test/regress/bin/normalize.sed
@@ -294,3 +294,17 @@ s/\/\*\{"cId":.*\*\///g
# Notice message that contains current columnar version that makes it harder to bump versions
s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA pg_catalog VERSION )"[0-9]+\.[0-9]+-[0-9]+"/\1 "x.y-z"/
+
+# pg16 changes
+# can be removed when dropping PG14&15 support
+#if PG_VERSION_NUM < PG_VERSION_16
+# (This is not preprocessor directive, but a reminder for the developer that will drop PG14&15 support )
+
+s/, password_required=false//g
+s/provide the file or change sslmode/provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode/g
+s/(:varcollid [0-9]+) :varlevelsup 0/\1 :varnullingrels (b) :varlevelsup 0/g
+s/table_name_for_view\.([_a-z0-9]+)(,| |$)/\1\2/g
+s/permission denied to terminate process/must be a superuser to terminate superuser process/g
+s/permission denied to cancel query/must be a superuser to cancel superuser query/g
+
+#endif /* PG_VERSION_NUM < PG_VERSION_16 */
diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py
index 9ff30ebf9..907102482 100644
--- a/src/test/regress/citus_tests/common.py
+++ b/src/test/regress/citus_tests/common.py
@@ -92,6 +92,7 @@ PG_MAJOR_VERSION = get_pg_major_version()
OLDEST_SUPPORTED_CITUS_VERSION_MATRIX = {
14: "10.2.0",
15: "11.1.5",
+ 16: "12.1devel",
}
OLDEST_SUPPORTED_CITUS_VERSION = OLDEST_SUPPORTED_CITUS_VERSION_MATRIX[PG_MAJOR_VERSION]
@@ -428,6 +429,10 @@ PORT_UPPER_BOUND = 32768
next_port = PORT_LOWER_BOUND
+def notice_handler(diag: psycopg.errors.Diagnostic):
+ print(f"{diag.severity}: {diag.message_primary}")
+
+
def cleanup_test_leftovers(nodes):
"""
Cleaning up test leftovers needs to be done in a specific order, because
@@ -443,7 +448,7 @@ def cleanup_test_leftovers(nodes):
node.cleanup_publications()
for node in nodes:
- node.cleanup_logical_replication_slots()
+ node.cleanup_replication_slots()
for node in nodes:
node.cleanup_schemas()
@@ -525,10 +530,12 @@ class QueryRunner(ABC):
def conn(self, *, autocommit=True, **kwargs):
"""Open a psycopg connection to this server"""
self.set_default_connection_options(kwargs)
- return psycopg.connect(
+ conn = psycopg.connect(
autocommit=autocommit,
**kwargs,
)
+ conn.add_notice_handler(notice_handler)
+ return conn
def aconn(self, *, autocommit=True, **kwargs):
"""Open an asynchronous psycopg connection to this server"""
@@ -571,6 +578,21 @@ class QueryRunner(ABC):
with self.cur(**kwargs) as cur:
cur.execute(query, params=params)
+ def sql_row(self, query, params=None, allow_empty_result=False, **kwargs):
+ """Run an SQL query that returns a single row and returns this row
+
+ This opens a new connection and closes it once the query is done
+ """
+ with self.cur(**kwargs) as cur:
+ cur.execute(query, params=params)
+ result = cur.fetchall()
+
+ if allow_empty_result and len(result) == 0:
+ return None
+
+ assert len(result) == 1, "sql_row returns more than one row"
+ return result[0]
+
def sql_value(self, query, params=None, allow_empty_result=False, **kwargs):
"""Run an SQL query that returns a single cell and return this value
@@ -730,7 +752,7 @@ class Postgres(QueryRunner):
# Used to track objects that we want to clean up at the end of a test
self.subscriptions = set()
self.publications = set()
- self.logical_replication_slots = set()
+ self.replication_slots = set()
self.schemas = set()
self.users = set()
@@ -982,7 +1004,7 @@ class Postgres(QueryRunner):
def create_logical_replication_slot(
self, name, plugin, temporary=False, twophase=False
):
- self.logical_replication_slots.add(name)
+ self.replication_slots.add(name)
self.sql(
"SELECT pg_catalog.pg_create_logical_replication_slot(%s,%s,%s,%s)",
(name, plugin, temporary, twophase),
@@ -1014,12 +1036,21 @@ class Postgres(QueryRunner):
)
)
- def cleanup_logical_replication_slots(self):
- for slot in self.logical_replication_slots:
- self.sql(
- "SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = %s",
- (slot,),
- )
+ def cleanup_replication_slots(self):
+ for slot in self.replication_slots:
+ start = time.time()
+ while True:
+ try:
+ self.sql(
+ "SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name = %s",
+ (slot,),
+ )
+ except psycopg.errors.ObjectInUse:
+ if time.time() < start + 10:
+ time.sleep(0.5)
+ continue
+ raise
+ break
def cleanup_subscriptions(self):
for subscription in self.subscriptions:
diff --git a/src/test/regress/citus_tests/config.py b/src/test/regress/citus_tests/config.py
index 560806962..9b81863e2 100644
--- a/src/test/regress/citus_tests/config.py
+++ b/src/test/regress/citus_tests/config.py
@@ -43,7 +43,7 @@ CITUS_ARBITRARY_TEST_DIR = "./tmp_citus_test"
MASTER = "master"
# This should be updated when citus version changes
-MASTER_VERSION = "12.0"
+MASTER_VERSION = "12.2"
HOME = expanduser("~")
diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py
index 00d5638d9..2b71f5e1b 100755
--- a/src/test/regress/citus_tests/run_test.py
+++ b/src/test/regress/citus_tests/run_test.py
@@ -110,7 +110,7 @@ DEPS = {
),
"create_role_propagation": TestDeps(None, ["multi_cluster_management"]),
"single_node_enterprise": TestDeps(None),
- "single_node": TestDeps(None),
+ "single_node": TestDeps(None, ["multi_test_helpers"]),
"single_node_truncate": TestDeps(None),
"multi_explain": TestDeps(
"base_schedule", ["multi_insert_select_non_pushable_queries"]
@@ -168,6 +168,7 @@ DEPS = {
],
),
"grant_on_schema_propagation": TestDeps("minimal_schedule"),
+ "propagate_extension_commands": TestDeps("minimal_schedule"),
}
diff --git a/src/test/regress/citus_tests/test/test_extension.py b/src/test/regress/citus_tests/test/test_extension.py
new file mode 100644
index 000000000..e9b90f115
--- /dev/null
+++ b/src/test/regress/citus_tests/test/test_extension.py
@@ -0,0 +1,44 @@
+import psycopg
+import pytest
+
+
+def test_create_drop_citus(coord):
+ with coord.cur() as cur1:
+ with coord.cur() as cur2:
+ # Conn1 drops the extension
+ # and Conn2 cannot use it.
+ cur1.execute("DROP EXTENSION citus")
+
+ with pytest.raises(psycopg.errors.UndefinedFunction):
+ # Conn1 dropped the extension. citus_version udf
+ # cannot be found.sycopg.errors.UndefinedFunction
+ # is expected here.
+ cur2.execute("SELECT citus_version();")
+
+ # Conn2 creates the extension,
+ # Conn1 is able to use it immediadtely.
+ cur2.execute("CREATE EXTENSION citus")
+ cur1.execute("SELECT citus_version();")
+ cur1.execute("DROP EXTENSION citus;")
+
+ with coord.cur() as cur1:
+ with coord.cur() as cur2:
+ # A connection is able to create and use the extension
+ # within a transaction block.
+ cur1.execute("BEGIN;")
+ cur1.execute("CREATE TABLE t1(id int);")
+ cur1.execute("CREATE EXTENSION citus;")
+ cur1.execute("SELECT create_reference_table('t1')")
+ cur1.execute("ABORT;")
+
+ # Conn1 aborted so Conn2 is be able to create and
+ # use the extension within a transaction block.
+ cur2.execute("BEGIN;")
+ cur2.execute("CREATE TABLE t1(id int);")
+ cur2.execute("CREATE EXTENSION citus;")
+ cur2.execute("SELECT create_reference_table('t1')")
+ cur2.execute("COMMIT;")
+
+ # Conn2 commited so Conn1 is be able to use the
+ # extension immediately.
+ cur1.execute("SELECT citus_version();")
diff --git a/src/test/regress/enterprise_isolation_schedule b/src/test/regress/enterprise_isolation_schedule
index 3e6655f88..689a7db75 100644
--- a/src/test/regress/enterprise_isolation_schedule
+++ b/src/test/regress/enterprise_isolation_schedule
@@ -10,7 +10,6 @@ test: isolation_move_placement_vs_modification
test: isolation_move_placement_vs_modification_fk
test: isolation_tenant_isolation_with_fkey_to_reference
test: isolation_ref2ref_foreign_keys_enterprise
-test: isolation_pg_send_cancellation
test: isolation_shard_move_vs_start_metadata_sync
test: isolation_tenant_isolation
test: isolation_tenant_isolation_nonblocking
diff --git a/src/test/regress/expected/alter_database_propagation.out b/src/test/regress/expected/alter_database_propagation.out
new file mode 100644
index 000000000..0ce217749
--- /dev/null
+++ b/src/test/regress/expected/alter_database_propagation.out
@@ -0,0 +1,150 @@
+set citus.log_remote_commands = true;
+set citus.grep_remote_commands = '%ALTER DATABASE%';
+-- since ALLOW_CONNECTIONS alter option should be executed in a different database
+-- and since we don't have a multiple database support for now,
+-- this statement will get error
+alter database regression ALLOW_CONNECTIONS false;
+ERROR: ALLOW_CONNECTIONS is not supported
+alter database regression with CONNECTION LIMIT 100;
+NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT 100;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT 100;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression with IS_TEMPLATE true CONNECTION LIMIT 50;
+NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true' CONNECTION LIMIT 50;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true' CONNECTION LIMIT 50;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression with CONNECTION LIMIT -1;
+NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT -1;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT -1;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression with IS_TEMPLATE true;
+NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true';
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true';
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression with IS_TEMPLATE false;
+NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'false';
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'false';
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+-- this statement will get error since we don't have a multiple database support for now
+alter database regression rename to regression2;
+ERROR: current database cannot be renamed
+alter database regression set default_transaction_read_only = true;
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only = 'true'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only = 'true'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+set default_transaction_read_only = false;
+alter database regression set default_transaction_read_only from current;
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set default_transaction_read_only to DEFAULT;
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression RESET default_transaction_read_only;
+NOTICE: issuing ALTER DATABASE regression RESET default_transaction_read_only
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression RESET default_transaction_read_only
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression SET TIME ZONE '-7';
+NOTICE: issuing ALTER DATABASE regression SET timezone = '-7'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET timezone = '-7'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set TIME ZONE LOCAL;
+NOTICE: issuing ALTER DATABASE regression SET timezone TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET timezone TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set TIME ZONE DEFAULT;
+NOTICE: issuing ALTER DATABASE regression SET timezone TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET timezone TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression RESET TIME ZONE;
+NOTICE: issuing ALTER DATABASE regression RESET timezone
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression RESET timezone
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression SET TIME ZONE INTERVAL '-08:00' HOUR TO MINUTE;
+NOTICE: issuing ALTER DATABASE regression SET TIME ZONE INTERVAL '@ 8 hours ago'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET TIME ZONE INTERVAL '@ 8 hours ago'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression RESET TIME ZONE;
+NOTICE: issuing ALTER DATABASE regression RESET timezone
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression RESET timezone
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set default_transaction_isolation = 'serializable';
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_isolation = 'serializable'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_isolation = 'serializable'
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+set default_transaction_isolation = 'read committed';
+alter database regression set default_transaction_isolation from current;
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_isolation FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_isolation FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set default_transaction_isolation to DEFAULT;
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_isolation TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET default_transaction_isolation TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression RESET default_transaction_isolation;
+NOTICE: issuing ALTER DATABASE regression RESET default_transaction_isolation
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression RESET default_transaction_isolation
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set statement_timeout = 1000;
+NOTICE: issuing ALTER DATABASE regression SET statement_timeout = 1000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET statement_timeout = 1000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+set statement_timeout = 2000;
+alter database regression set statement_timeout from current;
+NOTICE: issuing ALTER DATABASE regression SET statement_timeout FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET statement_timeout FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set statement_timeout to DEFAULT;
+NOTICE: issuing ALTER DATABASE regression SET statement_timeout TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET statement_timeout TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression RESET statement_timeout;
+NOTICE: issuing ALTER DATABASE regression RESET statement_timeout
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression RESET statement_timeout
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set lock_timeout = 1201.5;
+NOTICE: issuing ALTER DATABASE regression SET lock_timeout = 1201.5
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET lock_timeout = 1201.5
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+set lock_timeout = 1202.5;
+alter database regression set lock_timeout from current;
+NOTICE: issuing ALTER DATABASE regression SET lock_timeout FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET lock_timeout FROM CURRENT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression set lock_timeout to DEFAULT;
+NOTICE: issuing ALTER DATABASE regression SET lock_timeout TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression SET lock_timeout TO DEFAULT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+alter database regression RESET lock_timeout;
+NOTICE: issuing ALTER DATABASE regression RESET lock_timeout
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ALTER DATABASE regression RESET lock_timeout
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+set citus.log_remote_commands = false;
diff --git a/src/test/regress/expected/alter_table_add_column.out b/src/test/regress/expected/alter_table_add_column.out
new file mode 100644
index 000000000..61e7319d9
--- /dev/null
+++ b/src/test/regress/expected/alter_table_add_column.out
@@ -0,0 +1,101 @@
+CREATE SCHEMA alter_table_add_column;
+SET search_path TO alter_table_add_column;
+SET citus.next_shard_id TO 1830000;
+SET citus.shard_replication_factor TO 1;
+SET client_min_messages TO NOTICE;
+CREATE TABLE referenced (int_col integer PRIMARY KEY);
+CREATE TABLE referencing (text_col text);
+SELECT create_distributed_table('referenced', null);
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT create_distributed_table('referencing', null);
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+CREATE SCHEMA alter_table_add_column_other_schema;
+CREATE OR REPLACE FUNCTION alter_table_add_column_other_schema.my_random(numeric)
+ RETURNS numeric AS
+$$
+BEGIN
+ RETURN 7 * $1;
+END;
+$$
+LANGUAGE plpgsql IMMUTABLE;
+CREATE COLLATION caseinsensitive (
+ provider = icu,
+ locale = 'und-u-ks-level2'
+);
+CREATE TYPE "simple_!\'custom_type" AS (a integer, b integer);
+ALTER TABLE referencing ADD COLUMN test_1 integer DEFAULT (alter_table_add_column_other_schema.my_random(7) + random() + 5) NOT NULL CONSTRAINT fkey REFERENCES referenced(int_col) ON UPDATE SET DEFAULT ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE;
+ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL;
+ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT;
+ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50) NULL;
+ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL;
+ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type";
+-- we give up deparsing ALTER TABLE command if it needs to create a check constraint, and we fallback to legacy behavior
+ALTER TABLE referencing ADD COLUMN test_8 integer CHECK (test_8 > 0);
+ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints
+DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names
+HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_8 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name CHECK (check_expression);
+ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0);
+-- try to add test_6 again, but with IF NOT EXISTS
+ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text;
+NOTICE: column "test_6" of relation "referencing" already exists, skipping
+ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 integer;
+NOTICE: column "test_6" of relation "referencing" already exists, skipping
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('alter_table_add_column.referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"deferred": true, "deferable": true, "on_delete": "c", "on_update": "d", "match_type": "s", "constraint_names": ["fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "f", "constraint_names": ["referencing__fkey1"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_3"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey2"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_4"], "referencing_columns_set_null_or_default": null}]
+ f | [{"deferred": true, "deferable": true, "on_delete": "c", "on_update": "d", "match_type": "s", "constraint_names": ["fkey", "fkey_xxxxxxx"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_1830000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_1830001"], "referencing_columns": ["test_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey", "referencing__fkey_1830001"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_1830000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_1830001"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "f", "constraint_names": ["referencing__fkey1", "referencing__fkey1_1830001"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_1830000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_1830001"], "referencing_columns": ["test_3"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey2", "referencing__fkey2_1830001"], "referenced_tables": ["alter_table_add_column.referenced", "alter_table_add_column.referenced_1830000"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing", "alter_table_add_column.referencing_1830001"], "referencing_columns": ["test_4"], "referencing_columns_set_null_or_default": null}]
+ f | [{"deferred": true, "deferable": true, "on_delete": "c", "on_update": "d", "match_type": "s", "constraint_names": ["fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "f", "constraint_names": ["referencing__fkey1"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_3"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "d", "on_update": "c", "match_type": "s", "constraint_names": ["referencing__fkey2"], "referenced_tables": ["alter_table_add_column.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["alter_table_add_column.referencing"], "referencing_columns": ["test_4"], "referencing_columns_set_null_or_default": null}]
+(3 rows)
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_index_defs FROM get_index_defs('alter_table_add_column', 'referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON alter_table_add_column.referencing USING btree (test_2)"], "indexnames": ["referencing__key"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON alter_table_add_column.referencing USING btree (test_3)"], "indexnames": ["referencing__key1"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing_pkey ON alter_table_add_column.referencing USING btree (test_4) WITH (fillfactor='70')"], "indexnames": ["referencing_pkey"]}, {"indexdefs": ["CREATE UNIQUE INDEX unique_c ON alter_table_add_column.referencing USING btree (test_5) WITH (fillfactor='50')"], "indexnames": ["unique_c"]}]
+ f | [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON alter_table_add_column.referencing USING btree (test_2)", "CREATE UNIQUE INDEX referencing__key_1830001 ON alter_table_add_column.referencing_1830001 USING btree (test_2)"], "indexnames": ["referencing__key", "referencing__key_1830001"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON alter_table_add_column.referencing USING btree (test_3)", "CREATE UNIQUE INDEX referencing__key1_1830001 ON alter_table_add_column.referencing_1830001 USING btree (test_3)"], "indexnames": ["referencing__key1", "referencing__key1_1830001"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing_pkey ON alter_table_add_column.referencing USING btree (test_4) WITH (fillfactor='70')", "CREATE UNIQUE INDEX referencing_pkey_1830001 ON alter_table_add_column.referencing_1830001 USING btree (test_4) WITH (fillfactor='70')"], "indexnames": ["referencing_pkey", "referencing_pkey_1830001"]}, {"indexdefs": ["CREATE UNIQUE INDEX unique_c ON alter_table_add_column.referencing USING btree (test_5) WITH (fillfactor='50')", "CREATE UNIQUE INDEX unique_c_1830001 ON alter_table_add_column.referencing_1830001 USING btree (test_5) WITH (fillfactor='50')"], "indexnames": ["unique_c", "unique_c_1830001"]}]
+ f | [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON alter_table_add_column.referencing USING btree (test_2)"], "indexnames": ["referencing__key"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON alter_table_add_column.referencing USING btree (test_3)"], "indexnames": ["referencing__key1"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing_pkey ON alter_table_add_column.referencing USING btree (test_4) WITH (fillfactor='70')"], "indexnames": ["referencing_pkey"]}, {"indexdefs": ["CREATE UNIQUE INDEX unique_c ON alter_table_add_column.referencing USING btree (test_5) WITH (fillfactor='50')"], "indexnames": ["unique_c"]}]
+(3 rows)
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_column_defaults FROM get_column_defaults('alter_table_add_column', 'referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}]
+ f | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}]
+ f | [{"column_name": "test_1", "column_default": "(((alter_table_add_column_other_schema.my_random((7)::numeric))::double precision + random()) + (5)::double precision)", "generation_expression": null}, {"column_name": "test_3", "column_default": null, "generation_expression": "((test_1)::numeric * alter_table_add_column_other_schema.my_random((1)::numeric))"}]
+(3 rows)
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_column_attrs FROM get_column_attrs('alter_table_add_column.referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | {"relnames": ["alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]}
+ f | {"relnames": ["alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]}
+ f | {"relnames": ["alter_table_add_column.referencing_1830001", "alter_table_add_column.referencing"], "column_attrs": [{"not_null": true, "type_name": "int4", "column_name": "test_1", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_2", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_3", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "int4", "column_name": "test_4", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "int4", "column_name": "test_5", "collation_name": null, "compression_method": ""}, {"not_null": true, "type_name": "text", "column_name": "test_6", "collation_name": "caseinsensitive", "compression_method": "p"}, {"not_null": false, "type_name": "int4", "column_name": "test_8", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "simple_!\\'custom_type", "column_name": "test_\\'!7", "collation_name": null, "compression_method": ""}, {"not_null": false, "type_name": "text", "column_name": "text_col", "collation_name": "default", "compression_method": ""}]}
+(3 rows)
+
+SET client_min_messages TO WARNING;
+DROP SCHEMA alter_table_add_column, alter_table_add_column_other_schema CASCADE;
diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out
index 8a6f335a7..938c6bc0d 100644
--- a/src/test/regress/expected/alter_table_set_access_method.out
+++ b/src/test/regress/expected/alter_table_set_access_method.out
@@ -1,5 +1,6 @@
--- test for Postgres version
--- should error before PG12
+--
+-- ALTER_TABLE_SET_ACCESS_METHOD
+--
CREATE TABLE alter_am_pg_version_table (a INT);
SELECT alter_table_set_access_method('alter_am_pg_version_table', 'columnar');
NOTICE: creating a new table for public.alter_am_pg_version_table
@@ -770,8 +771,15 @@ RESET client_min_messages;
create table events (event_id bigserial, event_time timestamptz default now(), payload text);
create index on events (event_id);
insert into events (payload) select 'hello-'||s from generate_series(1,10) s;
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
BEGIN;
+ \if :server_version_ge_16
+ SET LOCAL debug_parallel_query = regress;
+ \else
SET LOCAL force_parallel_mode = regress;
+ \endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;
diff --git a/src/test/regress/expected/arbitrary_configs_truncate_cascade.out b/src/test/regress/expected/arbitrary_configs_truncate_cascade.out
index adf8a3cfc..4bf8edf71 100644
--- a/src/test/regress/expected/arbitrary_configs_truncate_cascade.out
+++ b/src/test/regress/expected/arbitrary_configs_truncate_cascade.out
@@ -1,9 +1,11 @@
SET search_path TO truncate_cascade_tests_schema;
+-- Hide detail of truncate error because it might either reference
+-- table_with_fk_1 or table_with_fk_2 in the error message.
+\set VERBOSITY TERSE
-- Test truncate error on table with dependencies
TRUNCATE table_with_pk;
ERROR: cannot truncate a table referenced in a foreign key constraint
-DETAIL: Table "table_with_fk_1" references "table_with_pk".
-HINT: Truncate table "table_with_fk_1" at the same time, or use TRUNCATE ... CASCADE.
+\set VERBOSITY DEFAULT
-- Test truncate rollback on table with dependencies
SELECT COUNT(*) FROM table_with_fk_1;
count
diff --git a/src/test/regress/expected/auto_undist_citus_local.out b/src/test/regress/expected/auto_undist_citus_local.out
index 0eaec17e5..9e6c6e014 100644
--- a/src/test/regress/expected/auto_undist_citus_local.out
+++ b/src/test/regress/expected/auto_undist_citus_local.out
@@ -1201,9 +1201,9 @@ ALTER TABLE reference_table_1 OWNER TO another_user;
SELECT run_command_on_placements('reference_table_1', 'ALTER TABLE %s OWNER TO another_user');
run_command_on_placements
---------------------------------------------------------------------
- (localhost,57636,1810093,t,"ALTER TABLE")
- (localhost,57637,1810093,t,"ALTER TABLE")
- (localhost,57638,1810093,t,"ALTER TABLE")
+ (localhost,57636,1810092,t,"ALTER TABLE")
+ (localhost,57637,1810092,t,"ALTER TABLE")
+ (localhost,57638,1810092,t,"ALTER TABLE")
(3 rows)
BEGIN;
diff --git a/src/test/regress/expected/citus_local_tables.out b/src/test/regress/expected/citus_local_tables.out
index cfa6410ba..4f3053094 100644
--- a/src/test/regress/expected/citus_local_tables.out
+++ b/src/test/regress/expected/citus_local_tables.out
@@ -494,7 +494,7 @@ CREATE TABLE local_table_4 (
b int references local_table_4(a));
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (xxxxx, 'citus_local_tables_test_schema', xxxxx, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_tables_test_schema.local_table_4 ADD CONSTRAINT local_table_4_a_fkey FOREIGN KEY (a) REFERENCES citus_local_tables_test_schema.citus_local_table_1(a)')
ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504014, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD COLUMN b int NOT NULL;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1504014, 'citus_local_tables_test_schema', 'ALTER TABLE citus_local_table_1 ADD COLUMN b integer NOT NULL;')
-- show that we added column with NOT NULL
SELECT table_name, column_name, is_nullable
FROM INFORMATION_SCHEMA.COLUMNS
@@ -635,7 +635,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'citus_local_table_4'::regclass;
SELECT column_name_to_column('citus_local_table_4', 'a');
column_name_to_column
---------------------------------------------------------------------
- {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
+ {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row)
SELECT master_update_shard_statistics(shardid)
diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out
index 363e87e58..8b3cb953f 100644
--- a/src/test/regress/expected/citus_local_tables_mx.out
+++ b/src/test/regress/expected/citus_local_tables_mx.out
@@ -769,8 +769,8 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
ORDER BY logicalrelid;
logicalrelid | partmethod | partkey
---------------------------------------------------------------------
- parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
- parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1}
+ parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
+ parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1}
(2 rows)
-- some tests for view propagation on citus local tables
@@ -912,12 +912,12 @@ select run_command_on_workers($$SELECT count(*)=0 from citus_local_tables_mx.v10
(2 rows)
CREATE TABLE loc_tb_2 (a int);
-CREATE VIEW v104 AS SELECT * from loc_tb_2;
+CREATE VIEW v104 AS SELECT * from loc_tb_2 table_name_for_view;
SET client_min_messages TO DEBUG1;
-- verify the CREATE command for the view is generated correctly
ALTER TABLE loc_tb_2 ADD CONSTRAINT fkey_2 FOREIGN KEY (a) references ref_tb(a);
-DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v104 (a) AS SELECT loc_tb_2.a
- FROM citus_local_tables_mx.loc_tb_2; ALTER VIEW citus_local_tables_mx.v104 OWNER TO postgres"
+DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v104 (a) AS SELECT a
+ FROM citus_local_tables_mx.loc_tb_2 table_name_for_view; ALTER VIEW citus_local_tables_mx.v104 OWNER TO postgres"
DEBUG: "view v104" has dependency to "table loc_tb_2" that is not in Citus' metadata
DEBUG: validating foreign key constraint "fkey_2_1330083"
SET client_min_messages TO WARNING;
diff --git a/src/test/regress/expected/citus_local_tables_queries.out b/src/test/regress/expected/citus_local_tables_queries.out
index f14607155..2b2761644 100644
--- a/src/test/regress/expected/citus_local_tables_queries.out
+++ b/src/test/regress/expected/citus_local_tables_queries.out
@@ -604,11 +604,9 @@ INSERT INTO citus_local_table
SELECT * from citus_local_table_2;
NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT citus_local_table_2.a, citus_local_table_2.b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2
INSERT INTO citus_local_table
-SELECT * from citus_local_table_2
-ORDER BY 1,2
-LIMIT 10;
-NOTICE: executing the command locally: SELECT a, b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2 ORDER BY a, b LIMIT 10
-NOTICE: executing the copy locally for shard xxxxx
+SELECT sum(a), b from citus_local_table_2
+GROUP BY b;
+NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT sum(citus_local_table_2.a) AS sum, citus_local_table_2.b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2 GROUP BY citus_local_table_2.b
INSERT INTO citus_local_table
SELECT * from postgres_local_table;
NOTICE: executing the copy locally for shard xxxxx
diff --git a/src/test/regress/expected/citus_local_tables_queries_0.out b/src/test/regress/expected/citus_local_tables_queries_0.out
index 3bac4fbee..4da695c89 100644
--- a/src/test/regress/expected/citus_local_tables_queries_0.out
+++ b/src/test/regress/expected/citus_local_tables_queries_0.out
@@ -604,11 +604,9 @@ INSERT INTO citus_local_table
SELECT * from citus_local_table_2;
NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT a, b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2
INSERT INTO citus_local_table
-SELECT * from citus_local_table_2
-ORDER BY 1,2
-LIMIT 10;
-NOTICE: executing the command locally: SELECT a, b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2 ORDER BY a, b LIMIT 10
-NOTICE: executing the copy locally for shard xxxxx
+SELECT sum(a), b from citus_local_table_2
+GROUP BY b;
+NOTICE: executing the command locally: INSERT INTO citus_local_table_queries.citus_local_table_1509001 AS citus_table_alias (a, b) SELECT sum(a) AS sum, b FROM citus_local_table_queries.citus_local_table_2_1509002 citus_local_table_2 GROUP BY b
INSERT INTO citus_local_table
SELECT * from postgres_local_table;
NOTICE: executing the copy locally for shard xxxxx
diff --git a/src/test/regress/expected/citus_schema_move.out b/src/test/regress/expected/citus_schema_move.out
new file mode 100644
index 000000000..160d2062b
--- /dev/null
+++ b/src/test/regress/expected/citus_schema_move.out
@@ -0,0 +1,224 @@
+CREATE SCHEMA citus_schema_move;
+SET search_path TO citus_schema_move;
+SET citus.next_shard_id TO 2220000;
+SET citus.shard_count TO 32;
+SET citus.shard_replication_factor TO 1;
+SET client_min_messages TO WARNING;
+SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
+ ?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
+ master_set_node_property
+---------------------------------------------------------------------
+
+(1 row)
+
+-- Due to a race condition that happens in TransferShards() when the same shard id
+-- is used to create the same shard on a different worker node, need to call
+-- citus_cleanup_orphaned_resources() to clean up any orphaned resources before
+-- running the tests.
+--
+-- See https://github.com/citusdata/citus/pull/7180#issuecomment-1706786615.
+CALL citus_cleanup_orphaned_resources();
+SET client_min_messages TO NOTICE;
+-- test null input, should be no-op
+SELECT citus_schema_move(schema_id=>null, target_node_name=>null, target_node_port=>null, shard_transfer_mode=>null);
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT citus_schema_move(schema_id=>null, target_node_id=>null, shard_transfer_mode=>null);
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT citus_schema_move(schema_id=>null, target_node_id=>null, shard_transfer_mode=>null);
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SET citus.enable_schema_based_sharding TO ON;
+CREATE SCHEMA s1;
+-- test invalid schema
+SELECT citus_schema_move('no_such_schema', 'dummy_node_name', 1234);
+ERROR: schema "no_such_schema" does not exist
+SELECT citus_schema_move('no_such_schema', 1234);
+ERROR: schema "no_such_schema" does not exist
+-- test regular schema
+SELECT citus_schema_move('citus_schema_move', 'dummy_node_name', 1234);
+ERROR: schema citus_schema_move is not a distributed schema
+SELECT citus_schema_move('citus_schema_move', 1234);
+ERROR: schema citus_schema_move is not a distributed schema
+-- test empty distributed schema
+SELECT citus_schema_move('s1', 'dummy_node_name', 1234);
+ERROR: cannot move distributed schema s1 because it is empty
+SELECT citus_schema_move('s1', 1234);
+ERROR: cannot move distributed schema s1 because it is empty
+CREATE TABLE s1.t1 (a int);
+-- test invalid node name / port / id
+SELECT citus_schema_move('s1', 'dummy_node_name', 1234);
+ERROR: Moving shards to a non-existing node is not supported
+HINT: Add the target node via SELECT citus_add_node('dummy_node_name', 1234);
+SELECT citus_schema_move('s1', 1234);
+ERROR: node with node id 1234 could not be found
+-- errors due to missing pkey / replicate ident.
+SELECT citus_schema_move('s1', nodename, nodeport) FROM pg_dist_node
+WHERE isactive AND shouldhaveshards AND noderole='primary' AND
+ (nodename, nodeport) NOT IN (
+ SELECT nodename, nodeport FROM citus_shards WHERE table_name = 's1.t1'::regclass
+ );
+ERROR: cannot use logical replication to transfer shards of the relation t1 since it doesn't have a REPLICA IDENTITY or PRIMARY KEY
+DETAIL: UPDATE and DELETE commands on the shard will error out during logical replication unless there is a REPLICA IDENTITY or PRIMARY KEY.
+HINT: If you wish to continue without a replica identity set the shard_transfer_mode to 'force_logical' or 'block_writes'.
+-- errors as we try to move the schema to the same node
+SELECT citus_schema_move('s1', nodename, nodeport, 'block_writes')
+FROM citus_shards
+JOIN pg_dist_node USING (nodename, nodeport)
+WHERE noderole = 'primary' AND table_name = 's1.t1'::regclass;
+ERROR: cannot move shard to the same node
+SELECT citus_schema_move('s1', nodeid, 'block_writes')
+FROM citus_shards
+JOIN pg_dist_node USING (nodename, nodeport)
+WHERE noderole = 'primary' AND table_name = 's1.t1'::regclass;
+ERROR: cannot move shard to the same node
+-- returns id, host name and host port of a non-coordinator node that given schema can be moved to
+CREATE OR REPLACE FUNCTION get_non_coord_candidate_node_for_schema_move(
+ schema_id regnamespace)
+RETURNS TABLE (nodeid integer, nodename text, nodeport integer)
+SET search_path TO 'pg_catalog, public'
+AS $func$
+BEGIN
+ IF NOT EXISTS (SELECT 1 FROM pg_dist_schema WHERE schemaid = schema_id)
+ THEN
+ RAISE EXCEPTION '% is not a distributed schema', schema_id;
+ END IF;
+
+ CREATE TEMP TABLE nodeid_nodename_nodeport ON COMMIT DROP AS
+ SELECT pdn1.nodeid, pdn1.nodename, pdn1.nodeport
+ FROM pg_dist_node pdn1
+ WHERE isactive AND shouldhaveshards AND noderole='primary' AND groupid != 0 AND
+ (pdn1.nodename, pdn1.nodeport) NOT IN (
+ SELECT cs.nodename, cs.nodeport
+ FROM citus_shards cs
+ JOIN pg_dist_node pdn2
+ ON cs.nodename = pdn2.nodename AND cs.nodeport = pdn2.nodeport
+ WHERE pdn2.noderole='primary' AND starts_with(table_name::text, schema_id::text)
+ );
+
+ IF NOT EXISTS (SELECT 1 FROM nodeid_nodename_nodeport)
+ THEN
+ RAISE EXCEPTION 'could not determine a node to move the schema to';
+ END IF;
+
+ RETURN QUERY SELECT * FROM nodeid_nodename_nodeport LIMIT 1;
+END;
+$func$ LANGUAGE plpgsql;
+CREATE TABLE s1.t2 (a int);
+-- move the schema to a different node
+SELECT nodeid AS s1_new_nodeid, quote_literal(nodename) AS s1_new_nodename, nodeport AS s1_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s1') \gset
+SELECT citus_schema_move('s1', :s1_new_nodename, :s1_new_nodeport, 'block_writes');
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT (:s1_new_nodename, :s1_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT nodeid AS s1_new_nodeid, quote_literal(nodename) AS s1_new_nodename, nodeport AS s1_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s1') \gset
+SELECT citus_schema_move('s1', :s1_new_nodeid, 'block_writes');
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT (:s1_new_nodename, :s1_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- move the schema to the coordinator
+SELECT citus_schema_move('s1', 'localhost', :master_port, 'block_writes');
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT ('localhost', :master_port) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- move the schema away from the coordinator
+SELECT nodeid AS s1_new_nodeid, quote_literal(nodename) AS s1_new_nodename, nodeport AS s1_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s1') \gset
+SELECT citus_schema_move('s1', :s1_new_nodename, :s1_new_nodeport, 'block_writes');
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT (:s1_new_nodename, :s1_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE USER tenantuser superuser;
+SET ROLE tenantuser;
+CREATE SCHEMA s2;
+CREATE TABLE s2.t1 (a int);
+CREATE TABLE s2.t2 (a int);
+CREATE USER regularuser;
+SET ROLE regularuser;
+-- throws an error as the user is not the owner of the schema
+SELECT citus_schema_move('s2', 'dummy_node', 1234);
+ERROR: must be owner of schema s2
+-- assign all tables to regularuser
+RESET ROLE;
+SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO regularuser; $$);
+ result
+---------------------------------------------------------------------
+ REASSIGN OWNED
+ REASSIGN OWNED
+ REASSIGN OWNED
+(3 rows)
+
+GRANT USAGE ON SCHEMA citus_schema_move TO regularuser;
+SET ROLE regularuser;
+SELECT nodeid AS s2_new_nodeid, quote_literal(nodename) AS s2_new_nodename, nodeport AS s2_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s2') \gset
+SELECT citus_schema_move('s2', :s2_new_nodename, :s2_new_nodeport, 'force_logical');
+ citus_schema_move
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT (:s2_new_nodename, :s2_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's2'::text));
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SET client_min_messages TO WARNING;
+DROP SCHEMA s2 CASCADE;
+SET client_min_messages TO NOTICE;
+RESET ROLE;
+REVOKE USAGE ON SCHEMA citus_schema_move FROM regularuser;
+DROP ROLE regularuser, tenantuser;
+RESET citus.enable_schema_based_sharding;
+SET client_min_messages TO WARNING;
+DROP SCHEMA citus_schema_move, s1 CASCADE;
diff --git a/src/test/regress/expected/columnar_chunk_filtering.out b/src/test/regress/expected/columnar_chunk_filtering.out
index 0d0534ccc..3acdd957d 100644
--- a/src/test/regress/expected/columnar_chunk_filtering.out
+++ b/src/test/regress/expected/columnar_chunk_filtering.out
@@ -1,6 +1,10 @@
--
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
--
+-- It has an alternative test output file
+-- because PG16 changed the order of some Filters in EXPLAIN
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
--
-- filtered_row_count returns number of rows filtered by the WHERE clause.
-- If chunks get filtered by columnar, less rows are passed to WHERE
@@ -370,10 +374,10 @@ SELECT * FROM r1, coltest WHERE
Filter: ((n1 % 10) = 0)
Rows Removed by Filter: 1
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4)
- Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text))
+ Filter: ((x1 > 15000) AND (id = r1.id1) AND ((x1)::text > '000000'::text))
Rows Removed by Filter: 999
Columnar Projected Columns: id, x1, x2, x3
- Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id))
+ Columnar Chunk Group Filters: ((x1 > 15000) AND (id = r1.id1))
Columnar Chunk Groups Removed by Filter: 19
(10 rows)
@@ -413,10 +417,10 @@ SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
-> Seq Scan on r2 (actual rows=5 loops=5)
-> Seq Scan on r3 (actual rows=5 loops=5)
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
- Filter: (r1.id1 = id)
+ Filter: (id = r1.id1)
Rows Removed by Filter: 999
Columnar Projected Columns: id, x1, x2, x3
- Columnar Chunk Group Filters: (r1.id1 = id)
+ Columnar Chunk Group Filters: (id = r1.id1)
Columnar Chunk Groups Removed by Filter: 19
-> Seq Scan on r4 (actual rows=1 loops=5)
-> Seq Scan on r5 (actual rows=1 loops=1)
@@ -588,10 +592,10 @@ DETAIL: parameterized by rels {r3}; 2 clauses pushed down
-> Nested Loop (actual rows=3 loops=1)
-> Seq Scan on r1 (actual rows=5 loops=1)
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
- Filter: ((r1.n1 > x1) AND (r1.id1 = id))
+ Filter: ((r1.n1 > x1) AND (id = r1.id1))
Rows Removed by Filter: 799
Columnar Projected Columns: id, x1, x2, x3
- Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
+ Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1))
Columnar Chunk Groups Removed by Filter: 19
-> Seq Scan on r2 (actual rows=5 loops=3)
-> Seq Scan on r3 (actual rows=5 loops=3)
@@ -618,10 +622,10 @@ SELECT * FROM r1, coltest_part WHERE
-> Seq Scan on r1 (actual rows=5 loops=1)
-> Append (actual rows=1 loops=5)
-> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3)
- Filter: ((r1.n1 > x1) AND (r1.id1 = id))
+ Filter: ((r1.n1 > x1) AND (id = r1.id1))
Rows Removed by Filter: 999
Columnar Projected Columns: id, x1, x2, x3
- Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
+ Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1))
Columnar Chunk Groups Removed by Filter: 9
-> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2)
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
diff --git a/src/test/regress/expected/columnar_chunk_filtering_0.out b/src/test/regress/expected/columnar_chunk_filtering_0.out
new file mode 100644
index 000000000..746f3406f
--- /dev/null
+++ b/src/test/regress/expected/columnar_chunk_filtering_0.out
@@ -0,0 +1,1138 @@
+--
+-- Test chunk filtering in columnar using min/max values in stripe skip lists.
+--
+-- It has an alternative test output file
+-- because PG16 changed the order of some Filters in EXPLAIN
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
+--
+-- filtered_row_count returns number of rows filtered by the WHERE clause.
+-- If chunks get filtered by columnar, less rows are passed to WHERE
+-- clause, so this function should return a lower number.
+--
+CREATE OR REPLACE FUNCTION filtered_row_count (query text) RETURNS bigint AS
+$$
+ DECLARE
+ result bigint;
+ rec text;
+ BEGIN
+ result := 0;
+
+ FOR rec IN EXECUTE 'EXPLAIN ANALYZE ' || query LOOP
+ IF rec ~ '^\s+Rows Removed by Filter' then
+ result := regexp_replace(rec, '[^0-9]*', '', 'g');
+ END IF;
+ END LOOP;
+
+ RETURN result;
+ END;
+$$ LANGUAGE PLPGSQL;
+set columnar.qual_pushdown_correlation = 0.0;
+-- Create and load data
+-- chunk_group_row_limit '1000', stripe_row_limit '2000'
+set columnar.stripe_row_limit = 2000;
+set columnar.chunk_group_row_limit = 1000;
+CREATE TABLE test_chunk_filtering (a int)
+ USING columnar;
+INSERT INTO test_chunk_filtering SELECT generate_series(1,10000);
+-- Verify that filtered_row_count is less than 1000 for the following queries
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering');
+ filtered_row_count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 200');
+ filtered_row_count
+---------------------------------------------------------------------
+ 801
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a > 200');
+ filtered_row_count
+---------------------------------------------------------------------
+ 200
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 9900');
+ filtered_row_count
+---------------------------------------------------------------------
+ 101
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a > 9900');
+ filtered_row_count
+---------------------------------------------------------------------
+ 900
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 0');
+ filtered_row_count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+-- Verify that filtered_row_count is less than 2000 for the following queries
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 1 AND 10');
+ filtered_row_count
+---------------------------------------------------------------------
+ 990
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 990 AND 2010');
+ filtered_row_count
+---------------------------------------------------------------------
+ 1979
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN -10 AND 0');
+ filtered_row_count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+-- Load data for second time and verify that filtered_row_count is exactly twice as before
+INSERT INTO test_chunk_filtering SELECT generate_series(1,10000);
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 200');
+ filtered_row_count
+---------------------------------------------------------------------
+ 1602
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a < 0');
+ filtered_row_count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+SELECT filtered_row_count('SELECT count(*) FROM test_chunk_filtering WHERE a BETWEEN 990 AND 2010');
+ filtered_row_count
+---------------------------------------------------------------------
+ 3958
+(1 row)
+
+set columnar.stripe_row_limit to default;
+set columnar.chunk_group_row_limit to default;
+-- Verify that we are fine with collations which use a different alphabet order
+CREATE TABLE collation_chunk_filtering_test(A text collate "da_DK")
+ USING columnar;
+COPY collation_chunk_filtering_test FROM STDIN;
+SELECT * FROM collation_chunk_filtering_test WHERE A > 'B';
+ a
+---------------------------------------------------------------------
+ Å
+(1 row)
+
+CREATE TABLE simple_chunk_filtering(i int) USING COLUMNAR;
+INSERT INTO simple_chunk_filtering SELECT generate_series(0,234567);
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT * FROM simple_chunk_filtering WHERE i > 123456;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=111111 loops=1)
+ Filter: (i > 123456)
+ Rows Removed by Filter: 3457
+ Columnar Projected Columns: i
+ Columnar Chunk Group Filters: (i > 123456)
+ Columnar Chunk Groups Removed by Filter: 12
+(6 rows)
+
+SET columnar.enable_qual_pushdown = false;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT * FROM simple_chunk_filtering WHERE i > 123456;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=111111 loops=1)
+ Filter: (i > 123456)
+ Rows Removed by Filter: 123457
+ Columnar Projected Columns: i
+(4 rows)
+
+SET columnar.enable_qual_pushdown TO DEFAULT;
+-- https://github.com/citusdata/citus/issues/4555
+TRUNCATE simple_chunk_filtering;
+INSERT INTO simple_chunk_filtering SELECT generate_series(0,200000);
+COPY (SELECT * FROM simple_chunk_filtering WHERE i > 180000) TO '/dev/null';
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT * FROM simple_chunk_filtering WHERE i > 180000;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on simple_chunk_filtering (actual rows=20000 loops=1)
+ Filter: (i > 180000)
+ Rows Removed by Filter: 1
+ Columnar Projected Columns: i
+ Columnar Chunk Group Filters: (i > 180000)
+ Columnar Chunk Groups Removed by Filter: 18
+(6 rows)
+
+DROP TABLE simple_chunk_filtering;
+CREATE TABLE multi_column_chunk_filtering(a int, b int) USING columnar;
+INSERT INTO multi_column_chunk_filtering SELECT i,i+1 FROM generate_series(0,234567) i;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=184567 loops=1)
+ Filter: (a > 50000)
+ Rows Removed by Filter: 1
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: (a > 50000)
+ Columnar Chunk Groups Removed by Filter: 5
+(7 rows)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT count(*) FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=184567 loops=1)
+ Filter: ((a > 50000) AND (b > 50000))
+ Rows Removed by Filter: 1
+ Columnar Projected Columns: a, b
+ Columnar Chunk Group Filters: ((a > 50000) AND (b > 50000))
+ Columnar Chunk Groups Removed by Filter: 5
+(7 rows)
+
+-- make next tests faster
+TRUNCATE multi_column_chunk_filtering;
+INSERT INTO multi_column_chunk_filtering SELECT generate_series(0,5);
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT b FROM multi_column_chunk_filtering WHERE a > 50000 AND b > 50000;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1)
+ Filter: ((a > 50000) AND (b > 50000))
+ Columnar Projected Columns: a, b
+ Columnar Chunk Group Filters: ((a > 50000) AND (b > 50000))
+ Columnar Chunk Groups Removed by Filter: 1
+(5 rows)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT b, a FROM multi_column_chunk_filtering WHERE b > 50000;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1)
+ Filter: (b > 50000)
+ Rows Removed by Filter: 6
+ Columnar Projected Columns: a, b
+ Columnar Chunk Group Filters: (b > 50000)
+ Columnar Chunk Groups Removed by Filter: 0
+(6 rows)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT FROM multi_column_chunk_filtering WHERE a > 50000;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=0 loops=1)
+ Filter: (a > 50000)
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: (a > 50000)
+ Columnar Chunk Groups Removed by Filter: 1
+(5 rows)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT FROM multi_column_chunk_filtering;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=1)
+ Columnar Projected Columns:
+(2 rows)
+
+BEGIN;
+ ALTER TABLE multi_column_chunk_filtering DROP COLUMN a;
+ ALTER TABLE multi_column_chunk_filtering DROP COLUMN b;
+ EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT * FROM multi_column_chunk_filtering;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=1)
+ Columnar Projected Columns:
+(2 rows)
+
+ROLLBACK;
+CREATE TABLE another_columnar_table(x int, y int) USING columnar;
+INSERT INTO another_columnar_table SELECT generate_series(0,5);
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT a, y FROM multi_column_chunk_filtering, another_columnar_table WHERE x > 1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop (actual rows=24 loops=1)
+ -> Custom Scan (ColumnarScan) on another_columnar_table (actual rows=4 loops=1)
+ Filter: (x > 1)
+ Rows Removed by Filter: 2
+ Columnar Projected Columns: x, y
+ Columnar Chunk Group Filters: (x > 1)
+ Columnar Chunk Groups Removed by Filter: 0
+ -> Custom Scan (ColumnarScan) on multi_column_chunk_filtering (actual rows=6 loops=4)
+ Columnar Projected Columns: a
+(9 rows)
+
+EXPLAIN (costs off, timing off, summary off)
+ SELECT y, * FROM another_columnar_table;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on another_columnar_table
+ Columnar Projected Columns: x, y
+(2 rows)
+
+EXPLAIN (costs off, timing off, summary off)
+ SELECT *, x FROM another_columnar_table;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on another_columnar_table
+ Columnar Projected Columns: x, y
+(2 rows)
+
+EXPLAIN (costs off, timing off, summary off)
+ SELECT y, another_columnar_table FROM another_columnar_table;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on another_columnar_table
+ Columnar Projected Columns: x, y
+(2 rows)
+
+EXPLAIN (costs off, timing off, summary off)
+ SELECT another_columnar_table, x FROM another_columnar_table;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on another_columnar_table
+ Columnar Projected Columns: x, y
+(2 rows)
+
+DROP TABLE multi_column_chunk_filtering, another_columnar_table;
+--
+-- https://github.com/citusdata/citus/issues/4780
+--
+create table part_table (id int) partition by range (id);
+create table part_1_row partition of part_table for values from (150000) to (160000);
+create table part_2_columnar partition of part_table for values from (0) to (150000) using columnar;
+insert into part_table select generate_series(1,159999);
+select filtered_row_count('select count(*) from part_table where id > 75000');
+ filtered_row_count
+---------------------------------------------------------------------
+ 5000
+(1 row)
+
+drop table part_table;
+-- test join parameterization
+set columnar.stripe_row_limit = 2000;
+set columnar.chunk_group_row_limit = 1000;
+create table r1(id1 int, n1 int); -- row
+create table r2(id2 int, n2 int); -- row
+create table r3(id3 int, n3 int); -- row
+create table r4(id4 int, n4 int); -- row
+create table r5(id5 int, n5 int); -- row
+create table r6(id6 int, n6 int); -- row
+create table r7(id7 int, n7 int); -- row
+create table coltest(id int, x1 int, x2 int, x3 int) using columnar;
+create table coltest_part(id int, x1 int, x2 int, x3 int)
+ partition by range (id);
+create table coltest_part0
+ partition of coltest_part for values from (0) to (10000)
+ using columnar;
+create table coltest_part1
+ partition of coltest_part for values from (10000) to (20000); -- row
+set columnar.stripe_row_limit to default;
+set columnar.chunk_group_row_limit to default;
+insert into r1 values(1234, 12350);
+insert into r1 values(4567, 45000);
+insert into r1 values(9101, 176000);
+insert into r1 values(14202, 7);
+insert into r1 values(18942, 189430);
+insert into r2 values(1234, 123502);
+insert into r2 values(4567, 450002);
+insert into r2 values(9101, 1760002);
+insert into r2 values(14202, 72);
+insert into r2 values(18942, 1894302);
+insert into r3 values(1234, 1235075);
+insert into r3 values(4567, 4500075);
+insert into r3 values(9101, 17600075);
+insert into r3 values(14202, 775);
+insert into r3 values(18942, 18943075);
+insert into r4 values(1234, -1);
+insert into r5 values(1234, -1);
+insert into r6 values(1234, -1);
+insert into r7 values(1234, -1);
+insert into coltest
+ select g, g*10, g*100, g*1000 from generate_series(0, 19999) g;
+insert into coltest_part
+ select g, g*10, g*100, g*1000 from generate_series(0, 19999) g;
+ANALYZE r1, r2, r3, coltest, coltest_part;
+-- force nested loop
+set enable_mergejoin=false;
+set enable_hashjoin=false;
+set enable_material=false;
+-- test different kinds of expressions
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT * FROM r1, coltest WHERE
+ id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop (actual rows=3 loops=1)
+ -> Seq Scan on r1 (actual rows=4 loops=1)
+ Filter: ((n1 % 10) = 0)
+ Rows Removed by Filter: 1
+ -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4)
+ Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text))
+ Rows Removed by Filter: 999
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id))
+ Columnar Chunk Groups Removed by Filter: 19
+(10 rows)
+
+SELECT * FROM r1, coltest WHERE
+ id1 = id AND x1 > 15000 AND x1::text > '000000' AND n1 % 10 = 0;
+ id1 | n1 | id | x1 | x2 | x3
+---------------------------------------------------------------------
+ 4567 | 45000 | 4567 | 45670 | 456700 | 4567000
+ 9101 | 176000 | 9101 | 91010 | 910100 | 9101000
+ 18942 | 189430 | 18942 | 189420 | 1894200 | 18942000
+(3 rows)
+
+-- test equivalence classes
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
+ id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
+ id4 = id5 AND id5 = id6 AND id6 = id7;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop (actual rows=1 loops=1)
+ Join Filter: (coltest.id = r7.id7)
+ -> Nested Loop (actual rows=1 loops=1)
+ Join Filter: (coltest.id = r6.id6)
+ -> Nested Loop (actual rows=1 loops=1)
+ Join Filter: (coltest.id = r5.id5)
+ -> Nested Loop (actual rows=1 loops=1)
+ Join Filter: (coltest.id = r4.id4)
+ Rows Removed by Join Filter: 4
+ -> Nested Loop (actual rows=5 loops=1)
+ -> Nested Loop (actual rows=5 loops=1)
+ Join Filter: (r1.id1 = r3.id3)
+ Rows Removed by Join Filter: 20
+ -> Nested Loop (actual rows=5 loops=1)
+ Join Filter: (r1.id1 = r2.id2)
+ Rows Removed by Join Filter: 20
+ -> Seq Scan on r1 (actual rows=5 loops=1)
+ -> Seq Scan on r2 (actual rows=5 loops=5)
+ -> Seq Scan on r3 (actual rows=5 loops=5)
+ -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
+ Filter: (r1.id1 = id)
+ Rows Removed by Filter: 999
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: (r1.id1 = id)
+ Columnar Chunk Groups Removed by Filter: 19
+ -> Seq Scan on r4 (actual rows=1 loops=5)
+ -> Seq Scan on r5 (actual rows=1 loops=1)
+ -> Seq Scan on r6 (actual rows=1 loops=1)
+ -> Seq Scan on r7 (actual rows=1 loops=1)
+(29 rows)
+
+SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
+ id = id1 AND id1 = id2 AND id2 = id3 AND id3 = id4 AND
+ id4 = id5 AND id5 = id6 AND id6 = id7;
+ id1 | n1 | id2 | n2 | id3 | n3 | id4 | n4 | id5 | n5 | id6 | n6 | id7 | n7 | id | x1 | x2 | x3
+---------------------------------------------------------------------
+ 1234 | 12350 | 1234 | 123502 | 1234 | 1235075 | 1234 | -1 | 1234 | -1 | 1234 | -1 | 1234 | -1 | 1234 | 12340 | 123400 | 1234000
+(1 row)
+
+-- test path generation with different thresholds
+set columnar.planner_debug_level = 'notice';
+set columnar.max_custom_scan_paths to 10;
+EXPLAIN (costs off, timing off, summary off)
+ SELECT * FROM coltest c1, coltest c2, coltest c3, coltest c4 WHERE
+ c1.id = c2.id and c1.id = c3.id and c1.id = c4.id;
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c2}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c2, c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c2, c3, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c2, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c3, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: parameterized by rels {c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c1}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c1, c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c1, c3, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c1, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c3, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: parameterized by rels {c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c1}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c1, c2}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c1, c2, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c1, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c2}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c2, c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: parameterized by rels {c4}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c1}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c1, c2}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c1, c2, c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c1, c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c2}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c2, c3}; 1 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: parameterized by rels {c3}; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop
+ -> Nested Loop
+ -> Nested Loop
+ -> Custom Scan (ColumnarScan) on coltest c1
+ Columnar Projected Columns: id, x1, x2, x3
+ -> Custom Scan (ColumnarScan) on coltest c2
+ Filter: (c1.id = id)
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: (c1.id = id)
+ -> Custom Scan (ColumnarScan) on coltest c3
+ Filter: (c1.id = id)
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: (c1.id = id)
+ -> Custom Scan (ColumnarScan) on coltest c4
+ Filter: (c1.id = id)
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: (c1.id = id)
+(17 rows)
+
+set columnar.max_custom_scan_paths to 2;
+EXPLAIN (costs off, timing off, summary off)
+ SELECT * FROM coltest c1, coltest c2, coltest c3, coltest c4 WHERE
+ c1.id = c2.id and c1.id = c3.id and c1.id = c4.id;
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c2
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c3
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for c4
+DETAIL: unparameterized; 0 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (c1.id = c4.id)
+ -> Nested Loop
+ Join Filter: (c1.id = c3.id)
+ -> Nested Loop
+ Join Filter: (c1.id = c2.id)
+ -> Custom Scan (ColumnarScan) on coltest c1
+ Columnar Projected Columns: id, x1, x2, x3
+ -> Custom Scan (ColumnarScan) on coltest c2
+ Columnar Projected Columns: id, x1, x2, x3
+ -> Custom Scan (ColumnarScan) on coltest c3
+ Columnar Projected Columns: id, x1, x2, x3
+ -> Custom Scan (ColumnarScan) on coltest c4
+ Columnar Projected Columns: id, x1, x2, x3
+(14 rows)
+
+set columnar.max_custom_scan_paths to default;
+set columnar.planner_debug_level to default;
+-- test more complex parameterization
+set columnar.planner_debug_level = 'notice';
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT * FROM r1, r2, r3, coltest WHERE
+ id1 = id2 AND id2 = id3 AND id3 = id AND
+ n1 > x1 AND n2 > x2 AND n3 > x3;
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r1}; 2 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r1, r2}; 3 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r1, r2, r3}; 4 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r1, r3}; 3 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r2}; 2 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r2, r3}; 3 clauses pushed down
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: parameterized by rels {r3}; 2 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop (actual rows=3 loops=1)
+ Join Filter: ((r3.n3 > coltest.x3) AND (r1.id1 = r3.id3))
+ Rows Removed by Join Filter: 12
+ -> Nested Loop (actual rows=3 loops=1)
+ Join Filter: ((r2.n2 > coltest.x2) AND (r1.id1 = r2.id2))
+ Rows Removed by Join Filter: 12
+ -> Nested Loop (actual rows=3 loops=1)
+ -> Seq Scan on r1 (actual rows=5 loops=1)
+ -> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
+ Filter: ((r1.n1 > x1) AND (r1.id1 = id))
+ Rows Removed by Filter: 799
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
+ Columnar Chunk Groups Removed by Filter: 19
+ -> Seq Scan on r2 (actual rows=5 loops=3)
+ -> Seq Scan on r3 (actual rows=5 loops=3)
+(16 rows)
+
+set columnar.planner_debug_level to default;
+SELECT * FROM r1, r2, r3, coltest WHERE
+ id1 = id2 AND id2 = id3 AND id3 = id AND
+ n1 > x1 AND n2 > x2 AND n3 > x3;
+ id1 | n1 | id2 | n2 | id3 | n3 | id | x1 | x2 | x3
+---------------------------------------------------------------------
+ 1234 | 12350 | 1234 | 123502 | 1234 | 1235075 | 1234 | 12340 | 123400 | 1234000
+ 9101 | 176000 | 9101 | 1760002 | 9101 | 17600075 | 9101 | 91010 | 910100 | 9101000
+ 18942 | 189430 | 18942 | 1894302 | 18942 | 18943075 | 18942 | 189420 | 1894200 | 18942000
+(3 rows)
+
+-- test partitioning parameterization
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT * FROM r1, coltest_part WHERE
+ id1 = id AND n1 > x1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop (actual rows=3 loops=1)
+ -> Seq Scan on r1 (actual rows=5 loops=1)
+ -> Append (actual rows=1 loops=5)
+ -> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3)
+ Filter: ((r1.n1 > x1) AND (r1.id1 = id))
+ Rows Removed by Filter: 999
+ Columnar Projected Columns: id, x1, x2, x3
+ Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
+ Columnar Chunk Groups Removed by Filter: 9
+ -> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2)
+ Filter: ((r1.n1 > x1) AND (r1.id1 = id))
+ Rows Removed by Filter: 10000
+(12 rows)
+
+SELECT * FROM r1, coltest_part WHERE
+ id1 = id AND n1 > x1;
+ id1 | n1 | id | x1 | x2 | x3
+---------------------------------------------------------------------
+ 1234 | 12350 | 1234 | 12340 | 123400 | 1234000
+ 9101 | 176000 | 9101 | 91010 | 910100 | 9101000
+ 18942 | 189430 | 18942 | 189420 | 1894200 | 18942000
+(3 rows)
+
+set enable_mergejoin to default;
+set enable_hashjoin to default;
+set enable_material to default;
+set columnar.planner_debug_level = 'notice';
+alter table coltest add column x5 int default (random()*20000)::int;
+analyze coltest;
+-- test that expressions on whole-row references are not pushed down
+select * from coltest where coltest = (1,1,1,1);
+NOTICE: columnar planner: cannot push down clause: var is whole-row reference or system column
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: unparameterized; 0 clauses pushed down
+ id | x1 | x2 | x3 | x5
+---------------------------------------------------------------------
+(0 rows)
+
+-- test that expressions on uncorrelated attributes are not pushed down
+set columnar.qual_pushdown_correlation to default;
+select * from coltest where x5 = 23484;
+NOTICE: columnar planner: cannot push down clause: absolute correlation (X.YZ) of var attribute 5 is smaller than the value configured in "columnar.qual_pushdown_correlation_threshold" (0.900)
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: unparameterized; 0 clauses pushed down
+ id | x1 | x2 | x3 | x5
+---------------------------------------------------------------------
+(0 rows)
+
+-- test that expressions on volatile functions are not pushed down
+create function vol() returns int language plpgsql as $$
+BEGIN
+ RETURN 1;
+END;
+$$;
+select * from coltest where x3 = vol();
+NOTICE: columnar planner: cannot push down clause: expr contains volatile functions
+NOTICE: columnar planner: adding CustomScan path for coltest
+DETAIL: unparameterized; 0 clauses pushed down
+ id | x1 | x2 | x3 | x5
+---------------------------------------------------------------------
+(0 rows)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+ SELECT * FROM coltest c1 WHERE ceil(x1) > 4222;
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: adding CustomScan path for c1
+DETAIL: unparameterized; 0 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on coltest c1 (actual rows=19577 loops=1)
+ Filter: (ceil((x1)::double precision) > '4222'::double precision)
+ Rows Removed by Filter: 423
+ Columnar Projected Columns: id, x1, x2, x3, x5
+(4 rows)
+
+set columnar.planner_debug_level to default;
+--
+-- https://github.com/citusdata/citus/issues/4488
+--
+create table columnar_prepared_stmt (x int, y int) using columnar;
+insert into columnar_prepared_stmt select s, s from generate_series(1,5000000) s;
+prepare foo (int) as select x from columnar_prepared_stmt where x = $1;
+execute foo(3);
+ x
+---------------------------------------------------------------------
+ 3
+(1 row)
+
+execute foo(3);
+ x
+---------------------------------------------------------------------
+ 3
+(1 row)
+
+execute foo(3);
+ x
+---------------------------------------------------------------------
+ 3
+(1 row)
+
+execute foo(3);
+ x
+---------------------------------------------------------------------
+ 3
+(1 row)
+
+select filtered_row_count('execute foo(3)');
+ filtered_row_count
+---------------------------------------------------------------------
+ 9999
+(1 row)
+
+select filtered_row_count('execute foo(3)');
+ filtered_row_count
+---------------------------------------------------------------------
+ 9999
+(1 row)
+
+select filtered_row_count('execute foo(3)');
+ filtered_row_count
+---------------------------------------------------------------------
+ 9999
+(1 row)
+
+select filtered_row_count('execute foo(3)');
+ filtered_row_count
+---------------------------------------------------------------------
+ 9999
+(1 row)
+
+drop table columnar_prepared_stmt;
+--
+-- https://github.com/citusdata/citus/issues/5258
+--
+set default_table_access_method to columnar;
+CREATE TABLE atest1 ( a int, b text );
+CREATE TABLE atest2 (col1 varchar(10), col2 boolean);
+INSERT INTO atest1 VALUES (1, 'one');
+SELECT * FROM atest1; -- ok
+ a | b
+---------------------------------------------------------------------
+ 1 | one
+(1 row)
+
+SELECT * FROM atest2; -- ok
+ col1 | col2
+---------------------------------------------------------------------
+(0 rows)
+
+INSERT INTO atest1 VALUES (2, 'two'); -- ok
+INSERT INTO atest1 SELECT 1, b FROM atest1; -- ok
+SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) );
+ col1 | col2
+---------------------------------------------------------------------
+(0 rows)
+
+CREATE TABLE t1 (name TEXT, n INTEGER);
+CREATE TABLE t2 (name TEXT, n INTEGER);
+CREATE TABLE t3 (name TEXT, n INTEGER);
+INSERT INTO t1 VALUES ( 'bb', 11 );
+INSERT INTO t2 VALUES ( 'bb', 12 );
+INSERT INTO t2 VALUES ( 'cc', 22 );
+INSERT INTO t2 VALUES ( 'ee', 42 );
+INSERT INTO t3 VALUES ( 'bb', 13 );
+INSERT INTO t3 VALUES ( 'cc', 23 );
+INSERT INTO t3 VALUES ( 'dd', 33 );
+SELECT * FROM
+(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1
+NATURAL INNER JOIN
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL INNER JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+ name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2
+---------------------------------------------------------------------
+ bb | 11 | 1 | 12 | 2 | 13 | 3
+(1 row)
+
+CREATE TABLE numrange_test (nr NUMRANGE);
+INSERT INTO numrange_test VALUES('[,)');
+INSERT INTO numrange_test VALUES('[3,]');
+INSERT INTO numrange_test VALUES('[, 5)');
+INSERT INTO numrange_test VALUES(numrange(1.1, 2.2));
+INSERT INTO numrange_test VALUES('empty');
+INSERT INTO numrange_test VALUES(numrange(1.7, 1.7, '[]'));
+create table numrange_test2(nr numrange);
+INSERT INTO numrange_test2 VALUES('[, 5)');
+INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2));
+INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2));
+INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2,'()'));
+INSERT INTO numrange_test2 VALUES('empty');
+set enable_nestloop=t;
+set enable_hashjoin=f;
+set enable_mergejoin=f;
+select * from numrange_test natural join numrange_test2 order by nr;
+ nr
+---------------------------------------------------------------------
+ empty
+ (,5)
+ [1.1,2.2)
+ [1.1,2.2)
+(4 rows)
+
+DROP TABLE atest1, atest2, t1, t2, t3, numrange_test, numrange_test2;
+set default_table_access_method to default;
+set columnar.planner_debug_level to notice;
+BEGIN;
+ SET LOCAL columnar.stripe_row_limit = 2000;
+ SET LOCAL columnar.chunk_group_row_limit = 1000;
+ create table pushdown_test (a int, b int) using columnar;
+ insert into pushdown_test values (generate_series(1, 200000));
+COMMIT;
+SET columnar.max_custom_scan_paths TO 50;
+SET columnar.qual_pushdown_correlation_threshold TO 0.0;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=2 loops=1)
+ Filter: ((a = 204356) OR (a = 104356) OR (a = 76556))
+ Rows Removed by Filter: 1998
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: ((a = 204356) OR (a = 104356) OR (a = 76556))
+ Columnar Chunk Groups Removed by Filter: 198
+(7 rows)
+
+SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a = 104356 or a = 76556;
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ sum
+---------------------------------------------------------------------
+ 180912
+(1 row)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1)
+ Filter: ((a = 194356) OR (a = 104356) OR (a = 76556))
+ Rows Removed by Filter: 2997
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: ((a = 194356) OR (a = 104356) OR (a = 76556))
+ Columnar Chunk Groups Removed by Filter: 197
+(7 rows)
+
+SELECT sum(a) FROM pushdown_test WHERE a = 194356 or a = 104356 or a = 76556;
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ sum
+---------------------------------------------------------------------
+ 375268
+(1 row)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test WHERE a = 204356 or a > a*-1 + b;
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 0 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1)
+ Filter: ((a = 204356) OR (a > ((a * '-1'::integer) + b)))
+ Rows Removed by Filter: 200000
+ Columnar Projected Columns: a, b
+(5 rows)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=38998 loops=1)
+ Filter: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000)))
+ Rows Removed by Filter: 2
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: (((a > 1000) AND (a < 10000)) OR ((a > 20000) AND (a < 50000)))
+ Columnar Chunk Groups Removed by Filter: 161
+(7 rows)
+
+SELECT sum(a) FROM pushdown_test where (a > 1000 and a < 10000) or (a > 20000 and a < 50000);
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ sum
+---------------------------------------------------------------------
+ 1099459500
+(1 row)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above
+NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 0 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=200000 loops=1)
+ Filter: ((((a)::double precision > random()) AND (a < (2 * a))) OR (a > 100))
+ Columnar Projected Columns: a
+(4 rows)
+
+SELECT sum(a) FROM pushdown_test where (a > random() and a < 2*a) or (a > 100);
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: none of the arguments were pushdownable, due to the reason(s) given above
+NOTICE: columnar planner: cannot push down clause: all arguments of an OR expression must be pushdownable but one of them was not, due to the reason given above
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 0 clauses pushed down
+ sum
+---------------------------------------------------------------------
+ 20000100000
+(1 row)
+
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3010 loops=1)
+ Filter: ((((a)::double precision > random()) AND (a <= 2000)) OR (a > 198990))
+ Rows Removed by Filter: 990
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: ((a <= 2000) OR (a > 198990))
+ Columnar Chunk Groups Removed by Filter: 196
+(7 rows)
+
+SELECT sum(a) FROM pushdown_test where (a > random() and a <= 2000) or (a > 200000-1010);
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ sum
+---------------------------------------------------------------------
+ 203491455
+(1 row)
+
+SET hash_mem_multiplier = 1.0;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test where
+(
+ a > random()
+ and
+ (
+ (a < 200 and a not in (select a from pushdown_test)) or
+ (a > 1000 and a < 2000)
+ )
+)
+or
+(a > 200000-2010);
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: must not contain a subplan
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=3009 loops=1)
+ Filter: ((((a)::double precision > random()) AND (((a < 200) AND (NOT (SubPlan 1))) OR ((a > 1000) AND (a < 2000)))) OR (a > 197990))
+ Rows Removed by Filter: 1991
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: (((a < 200) OR ((a > 1000) AND (a < 2000))) OR (a > 197990))
+ Columnar Chunk Groups Removed by Filter: 195
+ SubPlan 1
+ -> Materialize (actual rows=100 loops=199)
+ -> Custom Scan (ColumnarScan) on pushdown_test pushdown_test_1 (actual rows=199 loops=1)
+ Columnar Projected Columns: a
+(11 rows)
+
+RESET hash_mem_multiplier;
+SELECT sum(a) FROM pushdown_test where
+(
+ a > random()
+ and
+ (
+ (a < 200 and a not in (select a from pushdown_test)) or
+ (a > 1000 and a < 2000)
+ )
+)
+or
+(a > 200000-2010);
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 0 clauses pushed down
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: must not contain a subplan
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ sum
+---------------------------------------------------------------------
+ 401479455
+(1 row)
+
+create function stable_1(arg int) returns int language plpgsql STRICT IMMUTABLE as
+$$ BEGIN RETURN 1+arg; END; $$;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (ColumnarScan) on pushdown_test (actual rows=0 loops=1)
+ Filter: ((a < 6001) AND ((a)::double precision = random()) AND (a < stable_1(a)))
+ Rows Removed by Filter: 6000
+ Columnar Projected Columns: a
+ Columnar Chunk Group Filters: (a < 6001)
+ Columnar Chunk Groups Removed by Filter: 194
+(7 rows)
+
+SELECT sum(a) FROM pushdown_test where (a = random() and a < stable_1(a) and a < stable_1(6000));
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: cannot push down clause: must match 'Var Expr' or 'Expr Var'
+HINT: Var must only reference this rel, and Expr must not reference this rel
+NOTICE: columnar planner: adding CustomScan path for pushdown_test
+DETAIL: unparameterized; 1 clauses pushed down
+ sum
+---------------------------------------------------------------------
+
+(1 row)
+
+RESET columnar.max_custom_scan_paths;
+RESET columnar.qual_pushdown_correlation_threshold;
+RESET columnar.planner_debug_level;
+DROP TABLE pushdown_test;
+-- https://github.com/citusdata/citus/issues/5803
+CREATE TABLE pushdown_test(id int, country text) using columnar;
+BEGIN;
+ INSERT INTO pushdown_test VALUES(1, 'AL');
+ INSERT INTO pushdown_test VALUES(2, 'AU');
+END;
+BEGIN;
+ INSERT INTO pushdown_test VALUES(3, 'BR');
+ INSERT INTO pushdown_test VALUES(4, 'BT');
+END;
+BEGIN;
+ INSERT INTO pushdown_test VALUES(5, 'PK');
+ INSERT INTO pushdown_test VALUES(6, 'PA');
+END;
+BEGIN;
+ INSERT INTO pushdown_test VALUES(7, 'USA');
+ INSERT INTO pushdown_test VALUES(8, 'ZW');
+END;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1)
+ Filter: (country = ANY ('{USA,BR,ZW}'::text[]))
+ Rows Removed by Filter: 1
+ Columnar Projected Columns: id, country
+ Columnar Chunk Group Filters: (country = ANY ('{USA,BR,ZW}'::text[]))
+ Columnar Chunk Groups Removed by Filter: 2
+(6 rows)
+
+SELECT id FROM pushdown_test WHERE country IN ('USA', 'BR', 'ZW');
+ id
+---------------------------------------------------------------------
+ 3
+ 7
+ 8
+(3 rows)
+
+-- test for volatile functions with IN
+CREATE FUNCTION volatileFunction() returns TEXT language plpgsql AS
+$$
+BEGIN
+ return 'AL';
+END;
+$$;
+EXPLAIN (analyze on, costs off, timing off, summary off)
+SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (ColumnarScan) on pushdown_test (actual rows=3 loops=1)
+ Filter: (country = ANY (ARRAY['USA'::text, 'ZW'::text, volatilefunction()]))
+ Rows Removed by Filter: 5
+ Columnar Projected Columns: id, country
+(4 rows)
+
+SELECT * FROM pushdown_test WHERE country IN ('USA', 'ZW', volatileFunction());
+ id | country
+---------------------------------------------------------------------
+ 1 | AL
+ 7 | USA
+ 8 | ZW
+(3 rows)
+
+DROP TABLE pushdown_test;
diff --git a/src/test/regress/expected/columnar_create.out b/src/test/regress/expected/columnar_create.out
index d679b7790..73b891177 100644
--- a/src/test/regress/expected/columnar_create.out
+++ b/src/test/regress/expected/columnar_create.out
@@ -54,7 +54,7 @@ CREATE MATERIALIZED VIEW columnar_table_1_mv USING columnar
AS SELECT * FROM columnar_table_1;
SELECT columnar.get_storage_id(oid) AS columnar_table_1_mv_storage_id
FROM pg_class WHERE relname='columnar_table_1_mv' \gset
--- test columnar_relation_set_new_filenode
+-- test columnar_relation_set_new_filelocator
REFRESH MATERIALIZED VIEW columnar_table_1_mv;
SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_mv_storage_id);
columnar_metadata_has_storage_id
diff --git a/src/test/regress/expected/columnar_fallback_scan.out b/src/test/regress/expected/columnar_fallback_scan.out
index c31db0c43..b65e4118c 100644
--- a/src/test/regress/expected/columnar_fallback_scan.out
+++ b/src/test/regress/expected/columnar_fallback_scan.out
@@ -21,7 +21,14 @@ select count(*), min(i), max(i), avg(i) from fallback_scan;
-- Negative test: try to force a parallel plan with at least two
-- workers, but columnar should reject it and use a non-parallel scan.
--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+set debug_parallel_query = regress;
+\else
set force_parallel_mode = regress;
+\endif
set min_parallel_table_scan_size = 1;
set parallel_tuple_cost = 0;
set max_parallel_workers = 4;
@@ -39,7 +46,11 @@ select count(*), min(i), max(i), avg(i) from fallback_scan;
150000 | 1 | 150000 | 75000.500000000000
(1 row)
-set force_parallel_mode to default;
+\if :server_version_ge_16
+set debug_parallel_query = default;
+\else
+set force_parallel_mode = default;
+\endif
set min_parallel_table_scan_size to default;
set parallel_tuple_cost to default;
set max_parallel_workers to default;
diff --git a/src/test/regress/expected/columnar_indexes.out b/src/test/regress/expected/columnar_indexes.out
index 2a7c09634..cd05108b2 100644
--- a/src/test/regress/expected/columnar_indexes.out
+++ b/src/test/regress/expected/columnar_indexes.out
@@ -1,4 +1,6 @@
--
+-- COLUMNAR_INDEXES
+--
-- Testing indexes on on columnar tables.
--
CREATE SCHEMA columnar_indexes;
@@ -598,6 +600,9 @@ create table events (event_id bigserial, event_time timestamptz default now(), p
BEGIN;
-- this wouldn't flush any data
insert into events (payload) select 'hello-'||s from generate_series(1, 10) s;
+ SHOW server_version \gset
+ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+ \gset
-- Since table is large enough, normally postgres would prefer using
-- parallel workers when building the index.
--
@@ -609,7 +614,11 @@ BEGIN;
-- by postgres and throws an error. For this reason, here we don't expect
-- following commnad to fail since we prevent using parallel workers for
-- columnar tables.
+ \if :server_version_ge_16
+ SET LOCAL debug_parallel_query = regress;
+ \else
SET LOCAL force_parallel_mode = regress;
+ \endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;
diff --git a/src/test/regress/expected/columnar_memory.out b/src/test/regress/expected/columnar_memory.out
index 865472da1..229502437 100644
--- a/src/test/regress/expected/columnar_memory.out
+++ b/src/test/regress/expected/columnar_memory.out
@@ -77,10 +77,10 @@ FROM columnar_test_helpers.columnar_store_memory_stats();
top_growth | 1
-- before this change, max mem usage while executing inserts was 28MB and
--- with this change it's less than 8MB.
+-- with this change it's less than 9MB.
SELECT
- (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
- (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
+ (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
+ (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
-[ RECORD 1 ]--+--
large_batch_ok | t
first_batch_ok | t
diff --git a/src/test/regress/expected/columnar_partitioning.out b/src/test/regress/expected/columnar_partitioning.out
index e84953a0b..cd530b3f9 100644
--- a/src/test/regress/expected/columnar_partitioning.out
+++ b/src/test/regress/expected/columnar_partitioning.out
@@ -1,3 +1,6 @@
+--
+-- COLUMNAR_PARTITIONING
+--
CREATE TABLE parent(ts timestamptz, i int, n numeric, s text)
PARTITION BY RANGE (ts);
-- row partitions
@@ -17,8 +20,15 @@ INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
-- run parallel plans
+\if :server_version_ge_16
+SET debug_parallel_query = regress;
+\else
SET force_parallel_mode = regress;
+\endif
SET min_parallel_table_scan_size = 1;
SET parallel_tuple_cost = 0;
SET max_parallel_workers = 4;
@@ -121,7 +131,11 @@ SELECT count(*), sum(i), min(i), max(i) FROM parent;
(1 row)
SET columnar.enable_custom_scan TO DEFAULT;
+\if :server_version_ge_16
+SET debug_parallel_query TO DEFAULT;
+\else
SET force_parallel_mode TO DEFAULT;
+\endif
SET min_parallel_table_scan_size TO DEFAULT;
SET parallel_tuple_cost TO DEFAULT;
SET max_parallel_workers TO DEFAULT;
diff --git a/src/test/regress/expected/columnar_paths.out b/src/test/regress/expected/columnar_paths.out
index 4b32361fa..07b91a42e 100644
--- a/src/test/regress/expected/columnar_paths.out
+++ b/src/test/regress/expected/columnar_paths.out
@@ -326,7 +326,7 @@ WHERE w2.a = 123;
EXPLAIN (COSTS OFF) SELECT sub_1.b, sub_2.a, sub_3.avg
FROM
- (SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b HAVING count(DISTINCT a) > 0 ORDER BY 1 DESC LIMIT 5) AS sub_1,
+ (SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b ORDER BY 1 DESC LIMIT 5) AS sub_1,
(SELECT a FROM full_correlated WHERE (a > 10) GROUP BY a HAVING count(DISTINCT a) >= 1 ORDER BY 1 DESC LIMIT 3) AS sub_2,
(SELECT avg(a) AS AVG FROM full_correlated WHERE (a > 2) GROUP BY a HAVING sum(a) > 10 ORDER BY (sum(d) - avg(a) - COALESCE(array_upper(ARRAY[max(a)],1) * 5, 0)) DESC LIMIT 3) AS sub_3
WHERE sub_2.a < sub_1.b::integer
@@ -341,11 +341,10 @@ LIMIT 100;
-> Nested Loop
Join Filter: (full_correlated_1.a < (full_correlated.b)::integer)
-> Limit
- -> GroupAggregate
- Group Key: full_correlated.b
- Filter: (count(DISTINCT full_correlated.a) > 0)
- -> Sort
- Sort Key: full_correlated.b DESC
+ -> Sort
+ Sort Key: full_correlated.b DESC
+ -> HashAggregate
+ Group Key: full_correlated.b
-> Custom Scan (ColumnarScan) on full_correlated
Filter: (a > 2)
Columnar Projected Columns: a, b
@@ -366,7 +365,7 @@ LIMIT 100;
Filter: (sum(full_correlated_2.a) > 10)
-> Index Scan using full_correlated_btree on full_correlated full_correlated_2
Index Cond: (a > 2)
-(32 rows)
+(31 rows)
DROP INDEX full_correlated_btree;
CREATE INDEX full_correlated_hash ON full_correlated USING hash(a);
diff --git a/src/test/regress/expected/columnar_test_helpers.out b/src/test/regress/expected/columnar_test_helpers.out
index d85bbd54f..9a9e21057 100644
--- a/src/test/regress/expected/columnar_test_helpers.out
+++ b/src/test/regress/expected/columnar_test_helpers.out
@@ -1,3 +1,6 @@
+SET client_min_messages TO WARNING;
+DROP SCHEMA IF EXISTS columnar_test_helpers CASCADE;
+RESET client_min_messages;
CREATE SCHEMA columnar_test_helpers;
SET search_path TO columnar_test_helpers;
CREATE OR REPLACE FUNCTION columnar_storage_info(
diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out
index 46d5bf6a9..047827dd8 100644
--- a/src/test/regress/expected/coordinator_shouldhaveshards.out
+++ b/src/test/regress/expected/coordinator_shouldhaveshards.out
@@ -266,28 +266,28 @@ SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
BEGIN;
SET citus.enable_unique_job_ids TO off;
SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_25_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_25_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_25_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_25_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_1_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_2_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_3_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_4_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_1_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_2_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_3_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_4_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_25_1_0,repartition_25_2_0,repartition_25_3_0,repartition_25_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_26_1_0,repartition_26_2_0,repartition_26_3_0,repartition_26_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_25_1_3,repartition_25_2_3,repartition_25_3_3,repartition_25_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_26_1_3,repartition_26_2_3,repartition_26_3_3,repartition_26_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_27_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_27_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_27_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_27_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_1_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_2_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_3_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_4_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_5']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_5']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_1_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_2_5']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_3_5']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_4_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_26_1_2,repartition_26_2_2,repartition_26_3_2,repartition_26_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_27_1_2,repartition_27_2_2,repartition_27_3_2,repartition_27_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_26_1_5,repartition_26_2_5,repartition_26_3_5,repartition_26_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_27_1_5,repartition_27_2_5,repartition_27_3_5,repartition_27_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
100
@@ -305,28 +305,28 @@ NOTICE: executing the command locally: SELECT y FROM coordinator_shouldhaveshar
(1 row)
SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_29_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_29_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_29_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_29_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_1_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_2_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_3_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_4_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_1_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_2_5']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_3_5']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_4_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_5']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_5']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_29_1_2,repartition_29_2_2,repartition_29_3_2,repartition_29_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_30_1_2,repartition_30_2_2,repartition_30_3_2,repartition_30_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_29_1_5,repartition_29_2_5,repartition_29_3_5,repartition_29_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_30_1_5,repartition_30_2_5,repartition_30_3_5,repartition_30_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_31_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_31_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_31_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_31_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_1_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_2_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_3_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_4_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_4']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_4']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_1_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_2_4']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_3_4']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_4_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_30_1_1,repartition_30_2_1,repartition_30_3_1,repartition_30_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_31_1_1,repartition_31_2_1,repartition_31_3_1,repartition_31_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_30_1_4,repartition_30_2_4,repartition_30_3_4,repartition_30_4_4}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_31_1_4,repartition_31_2_4,repartition_31_3_4,repartition_31_4_4}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
100
@@ -592,7 +592,7 @@ NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1
(1 row)
ROLLBACK;
-CREATE table ref_table(x int PRIMARY KEY, y int);
+CREATE table ref_table(x int, y int);
-- this will be replicated to the coordinator because of add_coordinator test
SELECT create_reference_table('ref_table');
create_reference_table
@@ -620,18 +620,19 @@ ROLLBACK;
-- at the same time
INSERT INTO ref_table SELECT *, * FROM generate_series(1, 100);
NOTICE: executing the copy locally for shard xxxxx
+CREATE UNIQUE INDEX test_x_unique ON test(x);
WITH cte_1 AS (
-INSERT INTO ref_table SELECT * FROM ref_table LIMIT 10000 ON CONFLICT (x) DO UPDATE SET y = EXCLUDED.y + 1 RETURNING *)
+INSERT INTO test SELECT sum(x), y FROM test GROUP BY y ON CONFLICT (x) DO UPDATE SET y = EXCLUDED.y + 1 RETURNING *)
SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT x, y FROM coordinator_shouldhaveshards.ref_table_1503039 ref_table LIMIT 10000
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_table_1503039 AS citus_table_alias (x, y) SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('insert_select_XXX_1503039'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = (excluded.y OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.x, citus_table_alias.y
+NOTICE: executing the command locally: SELECT sum(x) AS x, y FROM coordinator_shouldhaveshards.test_1503000 test WHERE true GROUP BY y
+NOTICE: executing the command locally: SELECT sum(x) AS x, y FROM coordinator_shouldhaveshards.test_1503003 test WHERE true GROUP BY y
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) cte_1
count
---------------------------------------------------------------------
- 100
+ 0
(1 row)
+DROP INDEX test_x_unique;
-- issue #4237: preventing empty placement creation on coordinator
CREATE TABLE test_append_table(a int);
SELECT create_distributed_table('test_append_table', 'a', 'append');
diff --git a/src/test/regress/expected/coordinator_shouldhaveshards_0.out b/src/test/regress/expected/coordinator_shouldhaveshards_0.out
index 4c9dc0d18..00ccedb15 100644
--- a/src/test/regress/expected/coordinator_shouldhaveshards_0.out
+++ b/src/test/regress/expected/coordinator_shouldhaveshards_0.out
@@ -266,28 +266,28 @@ SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
BEGIN;
SET citus.enable_unique_job_ids TO off;
SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_25_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_25_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_25_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_25_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_1_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_2_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_3_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_4_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_0']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_1_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_2_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_3_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_25_4_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_3']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_25_1_0,repartition_25_2_0,repartition_25_3_0,repartition_25_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_26_1_0,repartition_26_2_0,repartition_26_3_0,repartition_26_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_25_1_3,repartition_25_2_3,repartition_25_3_3,repartition_25_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_26_1_3,repartition_26_2_3,repartition_26_3_3,repartition_26_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_26_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_26_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_27_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_27_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_27_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_27_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_1_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_2_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_3_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_4_2']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_1_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_2_5']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_3_5']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_26_4_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_1_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_2_5']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_3_5']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_27_4_5']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_26_1_2,repartition_26_2_2,repartition_26_3_2,repartition_26_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_27_1_2,repartition_27_2_2,repartition_27_3_2,repartition_27_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_26_1_5,repartition_26_2_5,repartition_26_3_5,repartition_26_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_27_1_5,repartition_27_2_5,repartition_27_3_5,repartition_27_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
100
@@ -305,28 +305,28 @@ NOTICE: executing the command locally: SELECT y FROM coordinator_shouldhaveshar
(1 row)
SELECT count(*) FROM test t1, test t2 WHERE t1.x = t2.y;
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_29_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_29_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_29_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_29_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_1_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_2_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_3_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_4_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_2']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_1_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_2_5']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_3_5']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_29_4_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_5']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_5']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_5']::text[],'localhost',57636) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_29_1_2,repartition_29_2_2,repartition_29_3_2,repartition_29_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_30_1_2,repartition_30_2_2,repartition_30_3_2,repartition_30_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_29_1_5,repartition_29_2_5,repartition_29_3_5,repartition_29_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_30_1_5,repartition_30_2_5,repartition_30_3_5,repartition_30_4_5}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_1','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503000 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_30_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_30_4','SELECT x AS column1 FROM coordinator_shouldhaveshards.test_1503003 t1 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_31_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_31_1','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503000 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_31_4' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_31_4','SELECT y AS column1 FROM coordinator_shouldhaveshards.test_1503003 t2 WHERE true',0,'hash','{-2147483648,-1431655766,-715827884,-2,715827880,1431655762}'::text[],'{-1431655767,-715827885,-3,715827879,1431655761,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_1_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_2_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_3_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_4_1']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_1_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_2_4']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_3_4']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_30_4_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_1_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_2_4']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_3_4']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_31_4_4']::text[],'localhost',57636) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_30_1_1,repartition_30_2_1,repartition_30_3_1,repartition_30_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_31_1_1,repartition_31_2_1,repartition_31_3_1,repartition_31_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_30_1_4,repartition_30_2_4,repartition_30_3_4,repartition_30_4_4}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 integer) JOIN read_intermediate_results('{repartition_31_1_4,repartition_31_2_4,repartition_31_3_4,repartition_31_4_4}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 integer) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
100
@@ -592,7 +592,7 @@ NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1
(1 row)
ROLLBACK;
-CREATE table ref_table(x int PRIMARY KEY, y int);
+CREATE table ref_table(x int, y int);
-- this will be replicated to the coordinator because of add_coordinator test
SELECT create_reference_table('ref_table');
create_reference_table
@@ -620,18 +620,19 @@ ROLLBACK;
-- at the same time
INSERT INTO ref_table SELECT *, * FROM generate_series(1, 100);
NOTICE: executing the copy locally for shard xxxxx
+CREATE UNIQUE INDEX test_x_unique ON test(x);
WITH cte_1 AS (
-INSERT INTO ref_table SELECT * FROM ref_table LIMIT 10000 ON CONFLICT (x) DO UPDATE SET y = EXCLUDED.y + 1 RETURNING *)
+INSERT INTO test SELECT sum(x), y FROM test GROUP BY y ON CONFLICT (x) DO UPDATE SET y = EXCLUDED.y + 1 RETURNING *)
SELECT count(*) FROM cte_1;
-NOTICE: executing the command locally: SELECT x, y FROM coordinator_shouldhaveshards.ref_table_1503039 ref_table LIMIT 10000
-NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO coordinator_shouldhaveshards.ref_table_1503039 AS citus_table_alias (x, y) SELECT x, y FROM read_intermediate_result('insert_select_XXX_1503039'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer) ON CONFLICT(x) DO UPDATE SET y = (excluded.y OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.x, citus_table_alias.y
+NOTICE: executing the command locally: SELECT sum(x) AS x, y FROM coordinator_shouldhaveshards.test_1503000 test WHERE true GROUP BY y
+NOTICE: executing the command locally: SELECT sum(x) AS x, y FROM coordinator_shouldhaveshards.test_1503003 test WHERE true GROUP BY y
NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) cte_1
count
---------------------------------------------------------------------
- 100
+ 0
(1 row)
+DROP INDEX test_x_unique;
-- issue #4237: preventing empty placement creation on coordinator
CREATE TABLE test_append_table(a int);
SELECT create_distributed_table('test_append_table', 'a', 'append');
diff --git a/src/test/regress/expected/create_ref_dist_from_citus_local.out b/src/test/regress/expected/create_ref_dist_from_citus_local.out
index 7f6821b1f..dc67400e0 100644
--- a/src/test/regress/expected/create_ref_dist_from_citus_local.out
+++ b/src/test/regress/expected/create_ref_dist_from_citus_local.out
@@ -366,5 +366,701 @@ BEGIN;
(1 row)
ROLLBACK;
+\set VERBOSITY DEFAULT
+-- Test the UDFs that we use to convert Citus local tables to single-shard tables and
+-- reference tables.
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true);
+ERROR: This is an internal Citus function can only be used in a distributed transaction
+SELECT pg_catalog.citus_internal_delete_placement_metadata(1);
+ERROR: This is an internal Citus function can only be used in a distributed transaction
+CREATE ROLE test_user_create_ref_dist WITH LOGIN;
+GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist;
+ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_user_create_ref_dist';
+SELECT pg_reload_conf();
+ pg_reload_conf
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT pg_sleep(0.1);
+ pg_sleep
+---------------------------------------------------------------------
+
+(1 row)
+
+SET ROLE test_user_create_ref_dist;
+SET citus.next_shard_id TO 1850000;
+SET citus.next_placement_id TO 8510000;
+SET citus.shard_replication_factor TO 1;
+SET search_path TO create_ref_dist_from_citus_local;
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true);
+ERROR: relation_id cannot be NULL
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true);
+ERROR: replication_model cannot be NULL
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true);
+ERROR: colocation_id cannot be NULL
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null);
+ERROR: auto_converted cannot be NULL
+SELECT pg_catalog.citus_internal_delete_placement_metadata(null);
+ERROR: placement_id cannot be NULL
+CREATE TABLE udf_test (col_1 int);
+SELECT citus_add_local_table_to_metadata('udf_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+BEGIN;
+ SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true);
+ citus_internal_update_none_dist_table_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+ SELECT COUNT(*)=1 FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.udf_test'::regclass AND repmodel = 'k' AND colocationid = 99999 AND autoconverted = true;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+ SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement
+ WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset
+ SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid);
+ citus_internal_delete_placement_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+ SELECT COUNT(*)=0 FROM pg_dist_placement WHERE placementid = :udf_test_placementid;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+ROLLBACK;
+RESET ROLE;
+DROP TABLE udf_test;
+REVOKE ALL ON SCHEMA create_ref_dist_from_citus_local FROM test_user_create_ref_dist;
+DROP USER test_user_create_ref_dist;
+ALTER SYSTEM RESET citus.enable_manual_metadata_changes_for_user;
+SELECT pg_reload_conf();
+ pg_reload_conf
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT pg_sleep(0.1);
+ pg_sleep
+---------------------------------------------------------------------
+
+(1 row)
+
+-- Test lazy conversion from Citus local to single-shard tables and reference tables.
+SET citus.next_shard_id TO 1860000;
+SET citus.next_placement_id TO 8520000;
+SET citus.shard_replication_factor TO 1;
+SET search_path TO create_ref_dist_from_citus_local;
+SET client_min_messages to ERROR;
+INSERT INTO reference_table_1 VALUES (1, 1), (2, 2), (201, 201), (202, 202);
+CREATE TABLE citus_local_table_7 (col_1 int UNIQUE);
+INSERT INTO citus_local_table_7 VALUES (1), (2), (201), (202);
+SELECT citus_add_local_table_to_metadata('citus_local_table_7');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+CREATE TABLE fkey_test (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+INSERT INTO fkey_test VALUES (1, '1', 1), (2, '2', 2), (201, '201', 201), (202, '202', 202);
+SELECT citus_add_local_table_to_metadata('fkey_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- check unsupported foreign key constraints
+ALTER TABLE reference_table_1 ADD CONSTRAINT ref_1_col_1_fkey_test_int_col_1 FOREIGN KEY (col_1) REFERENCES fkey_test(int_col_1);
+SELECT create_distributed_table('fkey_test', null, colocate_with=>'none');
+ERROR: relation fkey_test is referenced by a foreign key from reference_table_1
+DETAIL: foreign keys from a reference table to a distributed table are not supported.
+ALTER TABLE reference_table_1 DROP CONSTRAINT ref_1_col_1_fkey_test_int_col_1;
+ALTER TABLE citus_local_table_7 ADD CONSTRAINT citus_local_1_col_1_fkey_test_int_col_1 FOREIGN KEY (col_1) REFERENCES fkey_test(int_col_1);
+SELECT create_distributed_table('fkey_test', null, colocate_with=>'none');
+ERROR: relation fkey_test is referenced by a foreign key from citus_local_table_7
+DETAIL: foreign keys from a citus local table to a distributed table are not supported.
+ALTER TABLE citus_local_table_7 DROP CONSTRAINT citus_local_1_col_1_fkey_test_int_col_1;
+ALTER TABLE fkey_test ADD CONSTRAINT fkey_test_int_col_1_citus_local_1_col_1 FOREIGN KEY (int_col_1) REFERENCES citus_local_table_7(col_1);
+SELECT create_distributed_table('fkey_test', null, colocate_with=>'none');
+ERROR: cannot create foreign key constraint since relations are not colocated or not referencing a reference table
+DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
+ALTER TABLE fkey_test DROP CONSTRAINT fkey_test_int_col_1_citus_local_1_col_1;
+CREATE TABLE tbl_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+CREATE INDEX tbl_1_int_col_2_idx ON tbl_1 (int_col_2);
+INSERT INTO tbl_1 VALUES (1, '1', 1), (2, '2', 2), (201, '201', 201), (202, '202', 202);
+ALTER TABLE tbl_1 ADD CONSTRAINT tbl_1_int_col_1_ref_1_col_1 FOREIGN KEY (int_col_1) REFERENCES reference_table_1(col_1);
+ALTER TABLE tbl_1 ADD CONSTRAINT tbl_1_int_col_2_ref_1_col_1 FOREIGN KEY (int_col_2) REFERENCES reference_table_1(col_1);
+ALTER TABLE tbl_1 ADD CONSTRAINT tbl_1_int_col_2_tbl_1_int_col_1 FOREIGN KEY (int_col_2) REFERENCES tbl_1(int_col_1);
+SELECT citus_add_local_table_to_metadata('tbl_1');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_1') AS tbl_1_old_shard_id \gset
+SELECT create_distributed_table('tbl_1', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- check data
+SELECT * FROM tbl_1 ORDER BY int_col_1;
+ int_col_1 | text_col_1 | int_col_2
+---------------------------------------------------------------------
+ 1 | 1 | 1
+ 2 | 2 | 2
+ 201 | 201 | 201
+ 202 | 202 | 202
+(4 rows)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_1', :tbl_1_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_1', 3);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT cardinality(fkey_names) = 3 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_1');
+ verify_fkey_count_on_shards
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- test partitioning
+CREATE TABLE tbl_2 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text,
+ int_col_2 int
+) PARTITION BY RANGE (int_col_1);
+CREATE TABLE tbl_2_child_1 PARTITION OF tbl_2 FOR VALUES FROM (0) TO (100);
+CREATE TABLE tbl_2_child_2 PARTITION OF tbl_2 FOR VALUES FROM (200) TO (300);
+INSERT INTO tbl_2 VALUES (1, '1', 1), (2, '2', 2), (201, '201', 201), (202, '202', 202);
+SELECT citus_add_local_table_to_metadata('tbl_2');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+ALTER TABLE tbl_2 ADD CONSTRAINT tbl_2_int_col_1_ref_1_col_1 FOREIGN KEY (int_col_1) REFERENCES reference_table_1(col_1);
+ALTER TABLE tbl_2 ADD CONSTRAINT tbl_2_int_col_2_ref_1_col_1 FOREIGN KEY (int_col_2) REFERENCES reference_table_1(col_1);
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_2') AS tbl_2_old_shard_id \gset
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_2_child_1') AS tbl_2_child_1_old_shard_id \gset
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_2_child_2') AS tbl_2_child_2_old_shard_id \gset
+SELECT create_distributed_table('tbl_2', null, colocate_with=>'tbl_1');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2', :tbl_2_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_2', 1);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT cardinality(fkey_names) = 2 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_2');
+ verify_fkey_count_on_shards
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_partition_count_on_placements('create_ref_dist_from_citus_local.tbl_2', 2);
+ verify_partition_count_on_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify the same for children
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_1', :tbl_2_child_1_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_2_child_1', 1);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT cardinality(fkey_names) = 2 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_2_child_1');
+ verify_fkey_count_on_shards
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_2');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_2', :tbl_2_child_2_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_2_child_2', 1);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT cardinality(fkey_names) = 2 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_2_child_2');
+ verify_fkey_count_on_shards
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify that placements of all 4 tables are on the same node
+SELECT COUNT(DISTINCT(groupid)) = 1 FROM pg_dist_placement WHERE shardid IN (
+ :tbl_1_old_shard_id, :tbl_2_old_shard_id, :tbl_2_child_1_old_shard_id, :tbl_2_child_2_old_shard_id
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify the same by executing a router query that targets both tables
+SET client_min_messages to DEBUG2;
+SELECT COUNT(*) FROM tbl_1, tbl_2;
+DEBUG: Creating router plan
+ count
+---------------------------------------------------------------------
+ 16
+(1 row)
+
+SET client_min_messages to ERROR;
+CREATE TABLE reference_table_3(col_1 INT UNIQUE, col_2 INT UNIQUE);
+INSERT INTO reference_table_3 VALUES (1, 1), (2, 2), (201, 201), (202, 202);
+CREATE TABLE tbl_3 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text,
+ int_col_2 int
+) PARTITION BY RANGE (int_col_1);
+CREATE TABLE tbl_3_child_1 PARTITION OF tbl_3 FOR VALUES FROM (0) TO (100);
+ALTER TABLE tbl_3 ADD CONSTRAINT tbl_3_int_col_1_ref_1_col_1 FOREIGN KEY (int_col_1) REFERENCES reference_table_3(col_1);
+SELECT create_reference_table('reference_table_3');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO tbl_3 VALUES (1, '1', 1), (2, '2', 2);
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_3') AS tbl_3_old_shard_id \gset
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_3_child_1') AS tbl_3_child_1_old_shard_id \gset
+SELECT create_distributed_table('tbl_3', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3', :tbl_3_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_3', 1);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT cardinality(fkey_names) = 1 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_3');
+ verify_fkey_count_on_shards
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_partition_count_on_placements('create_ref_dist_from_citus_local.tbl_3', 1);
+ verify_partition_count_on_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify the same for children
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3_child_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3_child_1', :tbl_3_child_1_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_3_child_1', 1);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT cardinality(fkey_names) = 1 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_3_child_1');
+ verify_fkey_count_on_shards
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify that placements of all 2 tables are on the same node
+SELECT COUNT(DISTINCT(groupid)) = 1 FROM pg_dist_placement WHERE shardid IN (
+ :tbl_3_old_shard_id, :tbl_3_child_1_old_shard_id
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify the same by executing a router query that targets the table
+SET client_min_messages to DEBUG2;
+SELECT COUNT(*) FROM tbl_3;
+DEBUG: Distributed planning for a fast-path router query
+DEBUG: Creating router plan
+ count
+---------------------------------------------------------------------
+ 2
+(1 row)
+
+SET client_min_messages to ERROR;
+CREATE TABLE single_shard_conversion_colocated_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_colocated_1');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.single_shard_conversion_colocated_1') AS single_shard_conversion_colocated_1_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_colocated_1', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_1', :single_shard_conversion_colocated_1_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE TABLE single_shard_conversion_colocated_2 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_colocated_2');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.single_shard_conversion_colocated_2') AS single_shard_conversion_colocated_2_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_colocated_2', null, colocate_with=>'single_shard_conversion_colocated_1');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_2');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_2', :single_shard_conversion_colocated_2_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- make sure that they're created on the same colocation group
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_colocated_1'::regclass
+)
+=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_colocated_2'::regclass
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- verify that placements of 2 tables are on the same node
+SELECT COUNT(DISTINCT(groupid)) = 1 FROM pg_dist_placement WHERE shardid IN (
+ :single_shard_conversion_colocated_1_old_shard_id, :single_shard_conversion_colocated_2_old_shard_id
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE TABLE single_shard_conversion_noncolocated_1 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_noncolocated_1');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1') AS single_shard_conversion_noncolocated_1_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_noncolocated_1', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1', :single_shard_conversion_noncolocated_1_old_shard_id, false);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- make sure that they're created on different colocation groups
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_colocated_1'::regclass
+)
+!=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1'::regclass
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- Test creating a reference table from a Citus local table
+-- (ref_table_conversion_test) that has foreign keys from/to Citus
+-- local tables and reference tables:
+--
+-- citus_local_referencing ---------- ----------> citus_local_referenced
+-- | ^
+-- v |
+-- ref_table_conversion_test
+-- ^ |
+-- | v
+-- reference_table_referencing ---------- ----------> reference_table_referenced
+--
+CREATE TABLE citus_local_referenced(a int PRIMARY KEY);
+SELECT citus_add_local_table_to_metadata('citus_local_referenced');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO citus_local_referenced VALUES (1), (2), (3), (4);
+CREATE TABLE reference_table_referenced(a int PRIMARY KEY);
+SELECT create_reference_table('reference_table_referenced');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO reference_table_referenced VALUES (1), (2), (3), (4);
+CREATE TABLE ref_table_conversion_test (
+ a int PRIMARY KEY
+);
+SELECT citus_add_local_table_to_metadata('ref_table_conversion_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+ALTER TABLE ref_table_conversion_test ADD CONSTRAINT ref_table_a_citus_local_referenced_a FOREIGN KEY (a) REFERENCES citus_local_referenced(a);
+ALTER TABLE ref_table_conversion_test ADD CONSTRAINT ref_table_a_reference_table_referenced_a FOREIGN KEY (a) REFERENCES reference_table_referenced(a);
+INSERT INTO ref_table_conversion_test VALUES (1), (2), (3), (4);
+CREATE INDEX ref_table_conversion_test_a_idx1 ON ref_table_conversion_test (a);
+CREATE INDEX ref_table_conversion_test_a_idx2 ON ref_table_conversion_test (a);
+CREATE TABLE citus_local_referencing(a int);
+ALTER TABLE citus_local_referencing ADD CONSTRAINT citus_local_referencing_a_ref_table_a FOREIGN KEY (a) REFERENCES ref_table_conversion_test(a);
+SELECT citus_add_local_table_to_metadata('citus_local_referencing');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO citus_local_referencing VALUES (1), (2), (3), (4);
+CREATE TABLE reference_table_referencing(a int);
+ALTER TABLE reference_table_referencing ADD CONSTRAINT reference_table_referencing_a_ref_table_a FOREIGN KEY (a) REFERENCES ref_table_conversion_test(a);
+SELECT create_reference_table('reference_table_referencing');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO reference_table_referencing VALUES (1), (2), (3), (4);
+-- save old shardid and placementid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset
+SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset
+SELECT create_reference_table('ref_table_conversion_test');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- check data on all placements
+SELECT result FROM run_command_on_all_nodes(
+ $$SELECT COUNT(*)=4 FROM create_ref_dist_from_citus_local.ref_table_conversion_test$$
+);
+ result
+---------------------------------------------------------------------
+ t
+ t
+ t
+(3 rows)
+
+SELECT public.verify_pg_dist_partition_for_reference_table('create_ref_dist_from_citus_local.ref_table_conversion_test');
+ verify_pg_dist_partition_for_reference_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placements_for_reference_table('create_ref_dist_from_citus_local.ref_table_conversion_test',
+ :ref_table_conversion_test_old_shard_id,
+ :ref_table_conversion_test_old_coord_placement_id);
+ verify_shard_placements_for_reference_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.ref_table_conversion_test', 3);
+ verify_index_count_on_shard_placements
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT on_node, fkey_names FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.ref_table_conversion_test') ORDER BY 1,2;
+ on_node | fkey_names
+---------------------------------------------------------------------
+ on_coordinator | {citus_local_referencing_a_ref_table_a_1860015,ref_table_a_citus_local_referenced_a_1860014,ref_table_a_reference_table_referenced_a_1860014,reference_table_referencing_a_ref_table_a_1860016}
+ on_worker | {ref_table_a_reference_table_referenced_a_1860014,reference_table_referencing_a_ref_table_a_1860016}
+ on_worker | {ref_table_a_reference_table_referenced_a_1860014,reference_table_referencing_a_ref_table_a_1860016}
+(3 rows)
+
+CREATE TABLE dropped_column_test(a int, b int, c text not null, d text not null);
+INSERT INTO dropped_column_test VALUES(1, null, 'text_1', 'text_2');
+ALTER TABLE dropped_column_test DROP column b;
+SELECT citus_add_local_table_to_metadata('dropped_column_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT create_reference_table('dropped_column_test');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- check data on all placements
+SELECT result FROM run_command_on_all_nodes(
+ $$
+ SELECT jsonb_agg(q.*) FROM (
+ SELECT * FROM create_ref_dist_from_citus_local.dropped_column_test
+ ) q
+ $$
+);
+ result
+---------------------------------------------------------------------
+ [{"a": 1, "c": "text_1", "d": "text_2"}]
+ [{"a": 1, "c": "text_1", "d": "text_2"}]
+ [{"a": 1, "c": "text_1", "d": "text_2"}]
+(3 rows)
+
+SET citus.shard_replication_factor TO 2;
+CREATE TABLE replication_factor_test(a int);
+SELECT citus_add_local_table_to_metadata('replication_factor_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT create_distributed_table('replication_factor_test', null);
+ERROR: could not create single shard table: citus.shard_replication_factor is greater than 1
+HINT: Consider setting citus.shard_replication_factor to 1 and try again
+SET citus.shard_replication_factor TO 1;
-- cleanup at exit
DROP SCHEMA create_ref_dist_from_citus_local CASCADE;
diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out
index c5111b05f..59f7948a1 100644
--- a/src/test/regress/expected/create_role_propagation.out
+++ b/src/test/regress/expected/create_role_propagation.out
@@ -244,13 +244,13 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
1
(1 row)
-SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
- role | member | grantor | admin_option
+SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
+ role | member | grantor | admin_option
---------------------------------------------------------------------
- dist_role_1 | dist_role_2 | non_dist_role_1 | f
- dist_role_3 | non_dist_role_3 | postgres | f
- non_dist_role_1 | non_dist_role_2 | dist_role_1 | f
- non_dist_role_4 | dist_role_4 | postgres | f
+ dist_role_1 | dist_role_2 | t | f
+ dist_role_3 | non_dist_role_3 | t | f
+ non_dist_role_1 | non_dist_role_2 | t | f
+ non_dist_role_4 | dist_role_4 | t | f
(4 rows)
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
diff --git a/src/test/regress/expected/create_single_shard_table.out b/src/test/regress/expected/create_single_shard_table.out
index 248f196ff..2a7a94770 100644
--- a/src/test/regress/expected/create_single_shard_table.out
+++ b/src/test/regress/expected/create_single_shard_table.out
@@ -614,11 +614,11 @@ INSERT INTO "Table?!.1Table" VALUES (10, 15, (150, row_to_json(row(4,8)))::int_j
INSERT INTO "Table?!.1Table" VALUES (5, 5, (5, row_to_json(row(5,5)))::int_jsonb_type, row_to_json(row(5,5), true));
-- tuples that are supposed to violate different data type / check constraints
INSERT INTO "Table?!.1Table"(id, jsondata, name) VALUES (101, '{"a": 1}', 'text_1');
-ERROR: conflicting key value violates exclusion constraint "Table?!.1Table_name_excl_1730043"
+ERROR: conflicting key value violates exclusion constraint "Table?!.1Table_name_excl_1730042"
DETAIL: Key (name)=(text_1) conflicts with existing key (name)=(text_1).
CONTEXT: while executing command on localhost:xxxxx
INSERT INTO "Table?!.1Table"(id, jsondata, price) VALUES (101, '{"a": 1}', -1);
-ERROR: new row for relation "Table?!.1Table_1730043" violates check constraint "Table?!.1Table_price_check"
+ERROR: new row for relation "Table?!.1Table_1730042" violates check constraint "Table?!.1Table_price_check"
DETAIL: Failing row contains (101, null, null, {"a": 1}, null, -1, 0, null, 5, 14, 74).
CONTEXT: while executing command on localhost:xxxxx
INSERT INTO "Table?!.1Table"(id, jsondata, age_with_default_col) VALUES (101, '{"a": 1}', -1);
@@ -863,7 +863,7 @@ CREATE INDEX "my!Index2New" ON "NULL_!_dist_key"."nullKeyTable.1!?!9012345678901
CREATE UNIQUE INDEX uniqueIndex2New ON "NULL_!_dist_key"."nullKeyTable.1!?!9012345678901234567890123456789012345678901234567890123456789"(id);
-- error out for already existing, because of the unique index
INSERT INTO "NULL_!_dist_key"."nullKeyTable.1!?!9012345678901234567890123456789012345678901234567890123456789" VALUES (1, 1, row_to_json(row(1,1), true));
-ERROR: duplicate key value violates unique constraint "partition1_nullKeyTable.1!?!901234567890123456_bf4a8ac1_1730056"
+ERROR: duplicate key value violates unique constraint "partition1_nullKeyTable.1!?!901234567890123456_bf4a8ac1_1730054"
DETAIL: Key (id)=(X) already exists.
CONTEXT: while executing command on localhost:xxxxx
-- verify all 4 shard indexes are created on the same node
@@ -895,8 +895,8 @@ DETAIL: Reference tables and local tables can only have foreign keys to referen
ROLLBACK;
-- errors out because of foreign key violation
INSERT INTO "NULL_!_dist_key"."nullKeyTable.1!?!9012345678901234567890123456789012345678901234567890123456789" VALUES (100, 1, row_to_json(row(1,1), true));
-ERROR: insert or update on table "partition100_nullKeyTable.1!?!9012345678901234_0aba0bf3_1730058" violates foreign key constraint "fkey_to_dummy_ref_1730055"
-DETAIL: Key (id)=(X) is not present in table "dummy_reference_table_1730059".
+ERROR: insert or update on table "partition100_nullKeyTable.1!?!9012345678901234_0aba0bf3_1730056" violates foreign key constraint "fkey_to_dummy_ref_1730053"
+DETAIL: Key (id)=(X) is not present in table "dummy_reference_table_1730057".
CONTEXT: while executing command on localhost:xxxxx
-- now inserts successfully
INSERT INTO dummy_reference_table VALUES (100);
@@ -1163,7 +1163,7 @@ BEGIN;
INSERT INTO referencing_table VALUES (1, 2);
-- fails
INSERT INTO referencing_table VALUES (2, 2);
-ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730100"
+ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730098"
DETAIL: Key (a)=(2) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
ROLLBACK;
@@ -1209,7 +1209,7 @@ BEGIN;
INSERT INTO referencing_table VALUES (1, 2);
-- fails
INSERT INTO referencing_table VALUES (2, 2);
-ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730136"
+ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730133"
DETAIL: Key (a)=(2) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
ROLLBACK;
@@ -1239,8 +1239,8 @@ BEGIN;
(1 row)
SELECT create_distributed_table('referenced_table', NULL, distribution_type=>null);
-ERROR: cannot create foreign key constraint since foreign keys from reference tables and local tables to distributed tables are not supported
-DETAIL: Reference tables and local tables can only have foreign keys to reference tables and local tables
+ERROR: relation referenced_table is referenced by a foreign key from referencing_table
+DETAIL: foreign keys from a reference table to a distributed table are not supported.
ROLLBACK;
BEGIN;
SELECT create_distributed_table('referenced_table', NULL, distribution_type=>null);
@@ -1274,8 +1274,8 @@ BEGIN;
(1 row)
SELECT create_distributed_table('referenced_table', NULL, distribution_type=>null);
-ERROR: cannot create foreign key constraint since foreign keys from reference tables and local tables to distributed tables are not supported
-DETAIL: Reference tables and local tables can only have foreign keys to reference tables and local tables
+ERROR: relation referenced_table is referenced by a foreign key from referencing_table
+DETAIL: foreign keys from a citus local table to a distributed table are not supported.
ROLLBACK;
BEGIN;
SELECT create_distributed_table('referenced_table', NULL, distribution_type=>null);
@@ -1327,8 +1327,8 @@ SELECT result, success FROM run_command_on_workers($$
$$);
result | success
---------------------------------------------------------------------
- ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730153" | f
- ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730153" | f
+ ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730146" | f
+ ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730146" | f
(2 rows)
DROP TABLE referencing_table, referenced_table;
@@ -1343,8 +1343,8 @@ SELECT create_distributed_table('self_fkey_test', NULL, distribution_type=>null)
INSERT INTO self_fkey_test VALUES (1, 1); -- ok
INSERT INTO self_fkey_test VALUES (2, 3); -- fails
-ERROR: insert or update on table "self_fkey_test_1730154" violates foreign key constraint "self_fkey_test_b_fkey_1730154"
-DETAIL: Key (b)=(3) is not present in table "self_fkey_test_1730154".
+ERROR: insert or update on table "self_fkey_test_1730147" violates foreign key constraint "self_fkey_test_b_fkey_1730147"
+DETAIL: Key (b)=(3) is not present in table "self_fkey_test_1730147".
CONTEXT: while executing command on localhost:xxxxx
-- similar foreign key tests but this time create the referencing table later on
-- referencing table is a single-shard table
@@ -1368,7 +1368,7 @@ BEGIN;
INSERT INTO referencing_table VALUES (1, 2);
-- fails
INSERT INTO referencing_table VALUES (2, 2);
-ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730156"
+ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730149"
DETAIL: Key (a)=(2) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
ROLLBACK;
@@ -1391,7 +1391,7 @@ BEGIN;
INSERT INTO referencing_table VALUES (2, 1);
-- fails
INSERT INTO referencing_table VALUES (1, 2);
-ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_b_fkey_1730158"
+ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_b_fkey_1730151"
DETAIL: Key (a, b)=(1, 2) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
ROLLBACK;
@@ -1498,7 +1498,7 @@ BEGIN;
INSERT INTO referencing_table VALUES (1, 2);
-- fails
INSERT INTO referencing_table VALUES (2, 2);
-ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730199"
+ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "referencing_table_a_fkey_1730191"
DETAIL: Key (a)=(2) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
ROLLBACK;
diff --git a/src/test/regress/expected/distributed_domain.out b/src/test/regress/expected/distributed_domain.out
index 5043d4f05..30e388803 100644
--- a/src/test/regress/expected/distributed_domain.out
+++ b/src/test/regress/expected/distributed_domain.out
@@ -947,3 +947,4 @@ DROP DOMAIN IF EXISTS domain_does_not_exist;
NOTICE: type "domain_does_not_exist" does not exist, skipping
SET client_min_messages TO warning;
DROP SCHEMA distributed_domain, distributed_domain_moved CASCADE;
+DROP ROLE domain_owner;
diff --git a/src/test/regress/expected/executor_local_failure.out b/src/test/regress/expected/executor_local_failure.out
new file mode 100644
index 000000000..1e8d577ba
--- /dev/null
+++ b/src/test/regress/expected/executor_local_failure.out
@@ -0,0 +1,47 @@
+CREATE SCHEMA failure_local_modification;
+SET search_path TO failure_local_modification;
+SET citus.next_shard_id TO 1989000;
+SET citus.shard_replication_factor TO 1;
+CREATE TABLE failover_to_local (key int PRIMARY KEY, value varchar(10));
+SELECT create_reference_table('failover_to_local');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+\c - - - :worker_2_port
+SET search_path TO failure_local_modification;
+-- prevent local connection establishment, imitate
+-- a failure
+ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
+SELECT pg_reload_conf();
+ pg_reload_conf
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT pg_sleep(0.2);
+ pg_sleep
+---------------------------------------------------------------------
+
+(1 row)
+
+BEGIN;
+ -- we force the execution to use connections (e.g., remote execution)
+ -- however, we do not allow connections as local_shared_pool_size=-1
+ -- so, properly error out
+ SET LOCAL citus.enable_local_execution TO false;
+ INSERT INTO failover_to_local VALUES (1,'1'), (2,'2'),(3,'3'),(4,'4');
+ERROR: the total number of connections on the server is more than max_connections(100)
+HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true;
+ROLLBACK;
+ALTER SYSTEM RESET citus.local_shared_pool_size;
+SELECT pg_reload_conf();
+ pg_reload_conf
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+SET client_min_messages TO ERROR;
+DROP SCHEMA failure_local_modification cascade;
diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out
index b8909c8e2..0e4b85701 100644
--- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out
+++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out
@@ -196,9 +196,6 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()');
SELECT create_distributed_table('test_table', 'id');
NOTICE: Copying data from local table...
-NOTICE: copying the data has completed
-DETAIL: The local data in the table is no longer visible, but is still on disk.
-HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$create_distributed_table_non_empty_failure.test_table$$)
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
@@ -231,9 +228,6 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").cancel(' || pg_b
SELECT create_distributed_table('test_table', 'id');
NOTICE: Copying data from local table...
-NOTICE: copying the data has completed
-DETAIL: The local data in the table is no longer visible, but is still on disk.
-HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$create_distributed_table_non_empty_failure.test_table$$)
ERROR: canceling statement due to user request
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out
index e7f2c44ac..f6ec6d0c6 100644
--- a/src/test/regress/expected/failure_create_reference_table.out
+++ b/src/test/regress/expected/failure_create_reference_table.out
@@ -107,9 +107,6 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()');
SELECT create_reference_table('ref_table');
NOTICE: Copying data from local table...
-NOTICE: copying the data has completed
-DETAIL: The local data in the table is no longer visible, but is still on disk.
-HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$failure_reference_table.ref_table$$)
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
SELECT count(*) FROM pg_dist_shard_placement;
count
@@ -126,9 +123,6 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").cancel(' || pg_
SELECT create_reference_table('ref_table');
NOTICE: Copying data from local table...
-NOTICE: copying the data has completed
-DETAIL: The local data in the table is no longer visible, but is still on disk.
-HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$failure_reference_table.ref_table$$)
ERROR: canceling statement due to user request
SELECT count(*) FROM pg_dist_shard_placement;
count
diff --git a/src/test/regress/expected/failure_test_helpers.out b/src/test/regress/expected/failure_test_helpers.out
index 8c2be9825..da63a985f 100644
--- a/src/test/regress/expected/failure_test_helpers.out
+++ b/src/test/regress/expected/failure_test_helpers.out
@@ -11,7 +11,7 @@ SELECT pg_reload_conf();
(1 row)
-- Add some helper functions for sending commands to mitmproxy
-CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
+CREATE OR REPLACE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
DECLARE
command ALIAS FOR $1;
BEGIN
@@ -26,52 +26,13 @@ BEGIN
RETURN QUERY SELECT * FROM mitmproxy_result;
END;
$$ LANGUAGE plpgsql;
-CREATE FUNCTION citus.clear_network_traffic() RETURNS void AS $$
+CREATE OR REPLACE FUNCTION citus.clear_network_traffic() RETURNS void AS $$
BEGIN
PERFORM citus.mitmproxy('recorder.reset()');
RETURN; -- return void
END;
$$ LANGUAGE plpgsql;
-CREATE FUNCTION citus.dump_network_traffic()
-RETURNS TABLE(conn int, source text, message text) AS $$
-BEGIN
- CREATE TEMPORARY TABLE mitmproxy_command (command text) ON COMMIT DROP;
- CREATE TEMPORARY TABLE mitmproxy_result (
- conn int, source text, message text
- ) ON COMMIT DROP;
-
- INSERT INTO mitmproxy_command VALUES ('recorder.dump()');
-
- EXECUTE format('COPY mitmproxy_command TO %L', current_setting('citus.mitmfifo'));
- EXECUTE format('COPY mitmproxy_result FROM %L', current_setting('citus.mitmfifo'));
-
- RETURN QUERY SELECT * FROM mitmproxy_result;
-END;
-$$ LANGUAGE plpgsql;
-\c - - - :worker_2_port
--- Add some helper functions for sending commands to mitmproxy
-CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
-DECLARE
- command ALIAS FOR $1;
-BEGIN
- CREATE TEMPORARY TABLE mitmproxy_command (command text) ON COMMIT DROP;
- CREATE TEMPORARY TABLE mitmproxy_result (res text) ON COMMIT DROP;
-
- INSERT INTO mitmproxy_command VALUES (command);
-
- EXECUTE format('COPY mitmproxy_command TO %L', current_setting('citus.mitmfifo'));
- EXECUTE format('COPY mitmproxy_result FROM %L', current_setting('citus.mitmfifo'));
-
- RETURN QUERY SELECT * FROM mitmproxy_result;
-END;
-$$ LANGUAGE plpgsql;
-CREATE FUNCTION citus.clear_network_traffic() RETURNS void AS $$
-BEGIN
- PERFORM citus.mitmproxy('recorder.reset()');
- RETURN; -- return void
-END;
-$$ LANGUAGE plpgsql;
-CREATE FUNCTION citus.dump_network_traffic()
+CREATE OR REPLACE FUNCTION citus.dump_network_traffic()
RETURNS TABLE(conn int, source text, message text) AS $$
BEGIN
CREATE TEMPORARY TABLE mitmproxy_command (command text) ON COMMIT DROP;
diff --git a/src/test/regress/expected/fkeys_between_local_ref.out b/src/test/regress/expected/fkeys_between_local_ref.out
index 3df007cb3..e34b8da8c 100644
--- a/src/test/regress/expected/fkeys_between_local_ref.out
+++ b/src/test/regress/expected/fkeys_between_local_ref.out
@@ -123,10 +123,16 @@ BEGIN;
(1 row)
ROLLBACK;
--- this actually attempts to convert local tables to citus local tables but errors out
--- as citus doesn't support defining foreign keys via add column commands
-ALTER TABLE local_table_1 ADD COLUMN col_3 INT REFERENCES reference_table_1(col_1);
-ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints
+BEGIN;
+ ALTER TABLE local_table_1 ADD COLUMN col_3 INT REFERENCES reference_table_1(col_1);
+ -- show that we converted all 4 local tables in this schema to citus local tables
+ SELECT COUNT(*)=4 FROM citus_local_tables_in_schema;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+ROLLBACK;
BEGIN;
-- define a foreign key so that all 4 local tables become citus local tables
ALTER TABLE local_table_1 ADD CONSTRAINT fkey_11 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1);
diff --git a/src/test/regress/expected/foreign_key_to_reference_table.out b/src/test/regress/expected/foreign_key_to_reference_table.out
index dd110349e..5a25bb41f 100644
--- a/src/test/regress/expected/foreign_key_to_reference_table.out
+++ b/src/test/regress/expected/foreign_key_to_reference_table.out
@@ -250,13 +250,10 @@ SELECT create_distributed_table('referencing_table', 'ref_id');
(1 row)
ALTER TABLE referencing_table ADD COLUMN referencing int REFERENCES referenced_table(id) ON UPDATE CASCADE;
-ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints
-DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names
-HINT: You can issue each command separately such as ALTER TABLE referencing_table ADD COLUMN referencing data_type; ALTER TABLE referencing_table ADD CONSTRAINT constraint_name FOREIGN KEY (referencing) REFERENCES referenced_table(id) ON UPDATE CASCADE;
SELECT COUNT(*) FROM table_fkeys_in_workers WHERE relid SIMILAR TO 'fkey_reference_table.%\d{2,}' AND refd_relid SIMILAR TO 'fkey_reference_table.%\d{2,}';
count
---------------------------------------------------------------------
- 0
+ 8
(1 row)
DROP TABLE referencing_table;
diff --git a/src/test/regress/expected/foreign_tables_mx.out b/src/test/regress/expected/foreign_tables_mx.out
index 4bcddac5a..f9f63806b 100644
--- a/src/test/regress/expected/foreign_tables_mx.out
+++ b/src/test/regress/expected/foreign_tables_mx.out
@@ -38,6 +38,9 @@ SELECT partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid = 'foreign
n | s
(1 row)
+-- COPY FROM doesn't work for Citus foreign tables
+COPY foreign_table FROM stdin;
+ERROR: cannot PREPARE a transaction that has operated on postgres_fdw foreign tables
CREATE TABLE parent_for_foreign_tables (
project_id integer
) PARTITION BY HASH (project_id);
diff --git a/src/test/regress/expected/global_cancel.out b/src/test/regress/expected/global_cancel.out
index df50dbe3f..5adeef3c8 100644
--- a/src/test/regress/expected/global_cancel.out
+++ b/src/test/regress/expected/global_cancel.out
@@ -67,12 +67,12 @@ SELECT pg_typeof(:maintenance_daemon_gpid);
bigint
(1 row)
+\set VERBOSITY terse
SELECT pg_cancel_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to cancel superuser query
-CONTEXT: while executing command on localhost:xxxxx
SELECT pg_terminate_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to terminate superuser process
-CONTEXT: while executing command on localhost:xxxxx
+\set VERBOSITY default
-- we can cancel our own backend
SELECT pg_cancel_backend(citus_backend_gpid());
ERROR: canceling statement due to user request
diff --git a/src/test/regress/expected/grant_on_database_propagation.out b/src/test/regress/expected/grant_on_database_propagation.out
new file mode 100644
index 000000000..2fd135314
--- /dev/null
+++ b/src/test/regress/expected/grant_on_database_propagation.out
@@ -0,0 +1,967 @@
+-- Public role has connect,temp,temporary privileges on database
+-- To test these scenarios, we need to revoke these privileges from public role
+-- since public role privileges are inherited by new roles/users
+revoke connect,temp,temporary on database regression from public;
+CREATE SCHEMA grant_on_database_propagation;
+SET search_path TO grant_on_database_propagation;
+-- test grant/revoke CREATE privilege propagation on database
+create user myuser;
+grant create on database regression to myuser;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+revoke create on database regression from myuser;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+drop user myuser;
+---------------------------------------------------------------------
+-- test grant/revoke CONNECT privilege propagation on database
+create user myuser;
+grant CONNECT on database regression to myuser;
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+revoke connect on database regression from myuser;
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+drop user myuser;
+---------------------------------------------------------------------
+-- test grant/revoke TEMP privilege propagation on database
+create user myuser;
+-- test grant/revoke temp on database
+grant TEMP on database regression to myuser;
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+revoke TEMP on database regression from myuser;
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+drop user myuser;
+---------------------------------------------------------------------
+-- test temporary privilege on database
+create user myuser;
+-- test grant/revoke temporary on database
+grant TEMPORARY on database regression to myuser;
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+revoke TEMPORARY on database regression from myuser;
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+drop user myuser;
+---------------------------------------------------------------------
+-- test ALL privileges with ALL statement on database
+create user myuser;
+grant ALL on database regression to myuser;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+revoke ALL on database regression from myuser;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+drop user myuser;
+---------------------------------------------------------------------
+-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database
+create user myuser;
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+RESET ROLE;
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+drop user myuser;
+---------------------------------------------------------------------
+-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option
+create user myuser;
+create user myuser_1;
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
+set role myuser;
+--here since myuser does not have grant option, it should fail
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1;
+WARNING: no privileges were granted for "regression"
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+RESET ROLE;
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser with grant option;
+set role myuser;
+--here since myuser have grant option, it should succeed
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1 granted by myuser;
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+RESET ROLE;
+--below test should fail and should throw an error since myuser_1 still have the dependent privileges
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict;
+ERROR: dependent privileges exist
+HINT: Use CASCADE to revoke them too.
+--below test should fail and should throw an error since myuser_1 still have the dependent privileges
+revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict ;
+ERROR: dependent privileges exist
+HINT: Use CASCADE to revoke them too.
+--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade
+revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser cascade ;
+--here we test if myuser still have the privileges after revoke grant option for
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+reset role;
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser_1;
+drop user myuser_1;
+drop user myuser;
+---------------------------------------------------------------------
+-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database
+-- and multi user
+create user myuser;
+create user myuser_1;
+create database test_db;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+SELECT result FROM run_command_on_workers($$create database test_db$$);
+ result
+---------------------------------------------------------------------
+ CREATE DATABASE
+ CREATE DATABASE
+(2 rows)
+
+revoke connect,temp,temporary on database test_db from public;
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db to myuser,myuser_1;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c - - - :master_port
+RESET ROLE;
+--below test should fail and should throw an error
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser ;
+--below test should succeed and should not throw any error
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser_1;
+--below test should succeed and should not throw any error
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser cascade;
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :worker_1_port
+select has_database_privilege('myuser','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+ has_database_privilege
+---------------------------------------------------------------------
+ f
+(1 row)
+
+\c - - - :master_port
+reset role;
+drop user myuser_1;
+drop user myuser;
+drop database test_db;
+SELECT result FROM run_command_on_workers($$drop database test_db$$);
+ result
+---------------------------------------------------------------------
+ DROP DATABASE
+ DROP DATABASE
+(2 rows)
+
+---------------------------------------------------------------------
+-- rollbacks public role database privileges to original state
+grant connect,temp,temporary on database regression to public;
+SET client_min_messages TO ERROR;
+DROP SCHEMA grant_on_database_propagation CASCADE;
+---------------------------------------------------------------------
diff --git a/src/test/regress/expected/insert_select_repartition.out b/src/test/regress/expected/insert_select_repartition.out
index 88acc49e3..476aa8640 100644
--- a/src/test/regress/expected/insert_select_repartition.out
+++ b/src/test/regress/expected/insert_select_repartition.out
@@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
-GROUP BY c1, c2, c3, c4, c5, c6
+GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
-GROUP BY c1, c2, c3, c4, c5, c6
+GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@@ -1247,7 +1247,7 @@ DO UPDATE SET
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
- Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
+ Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4)
-> Seq Scan on source_table_4213644 source_table
(10 rows)
diff --git a/src/test/regress/expected/insert_select_repartition_0.out b/src/test/regress/expected/insert_select_repartition_0.out
index 7217be3e9..904bd215a 100644
--- a/src/test/regress/expected/insert_select_repartition_0.out
+++ b/src/test/regress/expected/insert_select_repartition_0.out
@@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
-GROUP BY c1, c2, c3, c4, c5, c6
+GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
-GROUP BY c1, c2, c3, c4, c5, c6
+GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@@ -1247,7 +1247,7 @@ DO UPDATE SET
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
- Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
+ Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4)
-> Seq Scan on source_table_4213644 source_table
(10 rows)
diff --git a/src/test/regress/expected/insert_select_single_shard_table.out b/src/test/regress/expected/insert_select_single_shard_table.out
index 8dbb1cf9a..f282ca28e 100644
--- a/src/test/regress/expected/insert_select_single_shard_table.out
+++ b/src/test/regress/expected/insert_select_single_shard_table.out
@@ -112,6 +112,7 @@ SET client_min_messages TO DEBUG2;
-- different table types together with single-shard tables.
-- use a single-shard table
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Distributed planning for a fast-path router query
@@ -119,31 +120,38 @@ DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a reference table
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 JOIN reference_table USING (a);
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 RIGHT JOIN reference_table USING (b) WHERE reference_table.a >= 1 AND reference_table.a <= 5;
-DEBUG: cannot perform a lateral outer join when a distributed subquery references a reference table
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t2.a, nullkey_c1_t2.b FROM nullkey_c1_t2 LEFT JOIN reference_table USING (b);
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 INTERSECT SELECT * FROM reference_table;
-DEBUG: cannot push down this subquery
-DETAIL: Intersect and Except are currently unsupported
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a colocated single-shard table
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 JOIN nullkey_c1_t2 USING (b);
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table_c1_t1 SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 FULL JOIN nullkey_c1_t2 USING (a);
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
@@ -163,6 +171,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT COALESCE(null
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table_c1_t1 SELECT * FROM nullkey_c1_t1 UNION SELECT * FROM nullkey_c1_t2;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
@@ -232,12 +241,14 @@ DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use append / range distributed tables
INSERT INTO range_table SELECT * FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO append_table SELECT * FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: INSERT ... SELECT into an append-distributed table is not supported
@@ -251,28 +262,32 @@ DEBUG: Router planner cannot handle multi-shard select queries
TRUNCATE distributed_table_c1_t1;
INSERT INTO distributed_table_c1_t1 SELECT i, i FROM generate_series(3, 8) i;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- Test inserting into a reference table by selecting from a combination of
-- different table types together with single-shard tables.
-- use a single-shard table
INSERT INTO reference_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a reference table
INSERT INTO reference_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 JOIN reference_table USING (a);
+DEBUG: Creating router plan
DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO reference_table SELECT nullkey_c1_t2.a, nullkey_c1_t2.b FROM nullkey_c1_t2 LEFT JOIN reference_table USING (b);
+DEBUG: Creating router plan
DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO reference_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 UNION SELECT * FROM reference_table;
-DEBUG: cannot push down this subquery
-DETAIL: Reference tables are not supported with union operator
+DEBUG: Creating router plan
+DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO reference_table SELECT nullkey_c1_t2.a, nullkey_c1_t2.b FROM nullkey_c1_t2 LEFT JOIN reference_table USING (b) WHERE b IN (SELECT b FROM matview);
@@ -284,10 +299,12 @@ DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a colocated single-shard table
INSERT INTO reference_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 JOIN nullkey_c1_t2 USING (b);
+DEBUG: Creating router plan
DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO reference_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 FULL JOIN nullkey_c1_t2 USING (a);
+DEBUG: Creating router plan
DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
@@ -344,23 +361,27 @@ DEBUG: Creating router plan
TRUNCATE reference_table;
INSERT INTO reference_table SELECT i, i FROM generate_series(0, 5) i;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- Test inserting into a citus local table by selecting from a combination of
-- different table types together with single-shard tables.
-- use a single-shard table
INSERT INTO citus_local_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot insert into a local table that is added to metadata
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a reference table
INSERT INTO citus_local_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 JOIN reference_table USING (a);
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot insert into a local table that is added to metadata
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a colocated single-shard table
INSERT INTO citus_local_table SELECT nullkey_c1_t1.a, nullkey_c1_t1.b FROM nullkey_c1_t1 JOIN nullkey_c1_t2 USING (b);
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot insert into a local table that is added to metadata
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
@@ -402,15 +423,18 @@ DEBUG: Creating router plan
TRUNCATE citus_local_table;
INSERT INTO citus_local_table SELECT i, i FROM generate_series(0, 10) i;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- Test inserting into a single-shard table by selecting from a combination of
-- different table types, together with or without single-shard tables.
-- use a postgres local table
INSERT INTO nullkey_c1_t1 SELECT postgres_local_table.a, postgres_local_table.b FROM postgres_local_table;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 SELECT postgres_local_table.a, postgres_local_table.b FROM postgres_local_table JOIN reference_table USING (a);
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot select from a local table
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
@@ -430,11 +454,13 @@ DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a citus local table
INSERT INTO nullkey_c1_t1 SELECT citus_local_table.a, citus_local_table.b FROM citus_local_table;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot select from a local relation when inserting into a distributed table
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 SELECT citus_local_table.a, citus_local_table.b FROM citus_local_table JOIN reference_table USING (a) JOIN postgres_local_table USING (a) ORDER BY 1,2 OFFSET 7;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot select from a local table
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
@@ -466,15 +492,18 @@ RESET citus.enable_repartition_joins;
SET client_min_messages TO DEBUG2;
-- use a non-colocated single-shard table
INSERT INTO nullkey_c2_t1 SELECT q.* FROM (SELECT reference_table.* FROM reference_table LEFT JOIN nullkey_c1_t1 USING (a)) q JOIN nullkey_c1_t2 USING (a);
-DEBUG: cannot perform a lateral outer join when a distributed subquery references a reference table
+DEBUG: Creating router plan
+DEBUG: INSERT target relation and all source relations of the SELECT must be colocated in distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- use a materialized view
INSERT INTO nullkey_c1_t1 SELECT * FROM matview;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 SELECT reference_table.a, reference_table.b FROM reference_table JOIN matview ON (reference_table.a = matview.a);
-DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
+DEBUG: Creating router plan
+DEBUG: distributed INSERT ... SELECT cannot select from a local relation when inserting into a distributed table
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 SELECT q.* FROM (SELECT reference_table.* FROM reference_table JOIN nullkey_c1_t1 USING (a)) q JOIN matview USING (a);
@@ -512,9 +541,11 @@ DEBUG: Creating router plan
TRUNCATE nullkey_c1_t1, nullkey_c2_t1;
INSERT INTO nullkey_c1_t1 SELECT i, i FROM generate_series(1, 8) i;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c2_t1 SELECT i, i FROM generate_series(2, 7) i;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- Test inserting into a local table by selecting from a combination of
@@ -576,8 +607,9 @@ cte_2 AS (
)
INSERT INTO distributed_table_c1_t1
SELECT * FROM cte_1 UNION SELECT * FROM cte_2 EXCEPT SELECT * FROM reference_table;
-DEBUG: cannot push down this subquery
-DETAIL: CTEs in subqueries are currently unsupported
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: CTE cte_1 is going to be inlined via distributed planning
DEBUG: CTE cte_2 is going to be inlined via distributed planning
DEBUG: Creating router plan
@@ -589,8 +621,9 @@ JOIN (
SELECT b FROM nullkey_c1_t2 ORDER BY b DESC LIMIT 1
) t2
ON t1.b < t2.b;
-DEBUG: cannot push down this subquery
-DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table_c1_t1 (a, b)
@@ -602,6 +635,7 @@ WITH cte AS (
)
SELECT d1, COALESCE(d2, a) FROM cte WHERE d1 IS NOT NULL AND d2 IS NOT NULL;
DEBUG: CTE cte is going to be inlined via distributed planning
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column.
HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
@@ -629,8 +663,9 @@ LEFT JOIN (
FROM nullkey_c1_t1
) t2 ON t1.b = t2.b
WHERE t2.rn > 0;
-DEBUG: cannot push down this subquery
-DETAIL: Window functions without PARTITION BY on distribution column is currently unsupported
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 (a, b)
@@ -645,7 +680,7 @@ JOIN (
) t2 ON t1.b = t2.b
WHERE t2.rn > 2;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for subquery SELECT b, row_number() OVER (ORDER BY b DESC) AS rn FROM insert_select_single_shard_table.distributed_table_c2_t1
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT t1.a, t2.b FROM (insert_select_single_shard_table.nullkey_c1_t1 t1 JOIN (SELECT q.rn, q.b FROM (SELECT intermediate_result.b, intermediate_result.rn FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(b integer, rn bigint)) q) t2 ON ((t1.b OPERATOR(pg_catalog.=) t2.b))) WHERE (t2.rn OPERATOR(pg_catalog.>) 2)
@@ -662,6 +697,7 @@ JOIN (
) q
) t2 ON t1.b = t2.b
WHERE t2.sum_val > 2;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
@@ -670,15 +706,9 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
-- in the output of the next query.
SET client_min_messages TO DEBUG1;
INSERT INTO nullkey_c1_t1 SELECT DISTINCT ON (a) a, b FROM nullkey_c1_t2;
-SET client_min_messages TO DEBUG2;
--- Similarly, we could push down the following query as well. see
--- https://github.com/citusdata/citus/pull/6831.
+-- keep low verbosity as PG15 and PG14 produces slightly different outputs
INSERT INTO nullkey_c1_t1 SELECT b, SUM(a) OVER (ORDER BY b) AS sum_val FROM nullkey_c1_t1;
-DEBUG: cannot push down this subquery
-DETAIL: Window functions without PARTITION BY on distribution column is currently unsupported
-DEBUG: Distributed planning for a fast-path router query
-DEBUG: Creating router plan
-DEBUG: Collecting INSERT ... SELECT results on coordinator
+SET client_min_messages TO DEBUG2;
INSERT INTO nullkey_c2_t1
SELECT t2.a, t2.b
FROM nullkey_c1_t1 AS t2
@@ -686,6 +716,7 @@ JOIN reference_table AS t3 ON (t2.a = t3.a)
WHERE NOT EXISTS (
SELECT 1 FROM nullkey_c1_t2 AS t1 WHERE t1.b = t3.b
);
+DEBUG: Creating router plan
DEBUG: INSERT target relation and all source relations of the SELECT must be colocated in distributed INSERT ... SELECT
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
@@ -712,6 +743,7 @@ JOIN (
SELECT a FROM nullkey_c1_t2
) AS t2
) AS t3 ON t1.a = t3.a;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Creating router plan
@@ -854,6 +886,7 @@ DEBUG: ALTER TABLE / ADD PRIMARY KEY will create implicit index "distributed_ta
DEBUG: verifying table "distributed_table_c1_t1"
INSERT INTO nullkey_c1_t1 AS t1 (a, b) SELECT t3.a, t3.b FROM nullkey_c1_t2 t2 JOIN reference_table t3 ON (t2.a = t3.a) ON CONFLICT (a)
DO UPDATE SET a = t1.a + 10;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO insert_select_single_shard_table.nullkey_c1_t1_1820000 AS t1 (a, b) SELECT t3.a, t3.b FROM (insert_select_single_shard_table.nullkey_c1_t2_1820001 t2 JOIN insert_select_single_shard_table.reference_table_1820003 t3 ON ((t2.a OPERATOR(pg_catalog.=) t3.a))) ON CONFLICT(a) DO UPDATE SET a = (t1.a OPERATOR(pg_catalog.+) 10)
SET client_min_messages TO DEBUG1;
INSERT INTO distributed_table_c1_t1 AS t1 (a, b) SELECT t3.a, t3.b FROM nullkey_c1_t2 t2 JOIN reference_table t3 ON (t2.a = t3.a) ON CONFLICT (a, b)
@@ -868,7 +901,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
-- This also fails due to https://github.com/citusdata/citus/issues/6826.
INSERT INTO nullkey_c1_t1 AS t1 (a, b) SELECT t3.a, t3.b FROM distributed_table_c1_t1 t2 JOIN reference_table t3 ON (t2.a = t3.a) WHERE t2.a = 3 ON CONFLICT (a)
DO UPDATE SET a = (SELECT max(b)+1 FROM distributed_table_c1_t1 WHERE a = 3);
-DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
+DEBUG: INSERT target relation and all source relations of the SELECT must be colocated in distributed INSERT ... SELECT
DEBUG: Collecting INSERT ... SELECT results on coordinator
ERROR: cannot execute a distributed query from a query on a shard
DETAIL: Executing a distributed query in a function call that may be pushed to a remote node can lead to incorrect results.
diff --git a/src/test/regress/expected/intermediate_results.out b/src/test/regress/expected/intermediate_results.out
index 8b2e200f7..140b7aacf 100644
--- a/src/test/regress/expected/intermediate_results.out
+++ b/src/test/regress/expected/intermediate_results.out
@@ -572,13 +572,6 @@ WARNING: Query could not find the intermediate result file "squares_2", it was
-- test refreshing mat views
SET client_min_messages TO ERROR;
CREATE USER some_other_user;
-SELECT run_command_on_workers($$GRANT ALL ON DATABASE regression TO some_other_user;$$);
- run_command_on_workers
----------------------------------------------------------------------
- (localhost,57637,t,GRANT)
- (localhost,57638,t,GRANT)
-(2 rows)
-
GRANT ALL ON DATABASE regression TO some_other_user;
RESET client_min_messages;
\c - some_other_user
diff --git a/src/test/regress/expected/isolation_add_remove_node.out b/src/test/regress/expected/isolation_add_remove_node.out
index 9248ed5be..15a5c1a58 100644
--- a/src/test/regress/expected/isolation_add_remove_node.out
+++ b/src/test/regress/expected/isolation_add_remove_node.out
@@ -698,9 +698,17 @@ master_remove_node
(1 row)
-starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes
+starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-disable-node-2 s2-disable-node-2 s1-abort s2-wait-metadata-sync s1-show-nodes
step s1-add-node-1:
- SELECT 1 FROM master_add_node('localhost', 57637);
+ SELECT 1 FROM master_add_node('localhost', 57637);
+
+?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+step s1-add-node-2:
+ SELECT 1 FROM master_add_node('localhost', 57638);
?column?
---------------------------------------------------------------------
@@ -708,11 +716,11 @@ step s1-add-node-1:
(1 row)
step s1-begin:
- BEGIN;
+ BEGIN;
-step s1-disable-node-1:
- SELECT 1 FROM master_disable_node('localhost', 57637);
- SELECT public.wait_until_metadata_sync();
+step s1-disable-node-2:
+ SELECT 1 FROM master_disable_node('localhost', 57638);
+ SELECT public.wait_until_metadata_sync();
?column?
---------------------------------------------------------------------
@@ -724,34 +732,38 @@ wait_until_metadata_sync
(1 row)
-step s2-disable-node-1:
- SELECT 1 FROM master_disable_node('localhost', 57637);
- SELECT public.wait_until_metadata_sync();
+step s2-disable-node-2:
+ SELECT 1 FROM master_disable_node('localhost', 57638);
step s1-abort:
- ABORT;
+ ABORT;
-step s2-disable-node-1: <... completed>
+step s2-disable-node-2: <... completed>
?column?
---------------------------------------------------------------------
1
(1 row)
+step s2-wait-metadata-sync:
+ SELECT public.wait_until_metadata_sync();
+
wait_until_metadata_sync
---------------------------------------------------------------------
(1 row)
step s1-show-nodes:
- SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
+ SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
nodename |nodeport|isactive
---------------------------------------------------------------------
-localhost| 57637|f
-(1 row)
+localhost| 57637|t
+localhost| 57638|f
+(2 rows)
master_remove_node
---------------------------------------------------------------------
-(1 row)
+
+(2 rows)
diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out
index 0c905c76a..94b4759c0 100644
--- a/src/test/regress/expected/isolation_citus_dist_activity.out
+++ b/src/test/regress/expected/isolation_citus_dist_activity.out
@@ -44,20 +44,12 @@ query |citus_nodename_for_nodeid|citus_n
step s3-view-worker:
SELECT query, citus_nodename_for_nodeid(citus_nodeid_for_gpid(global_pid)), citus_nodeport_for_nodeid(citus_nodeid_for_gpid(global_pid)), state, wait_event_type, wait_event, usename, datname FROM citus_stat_activity WHERE query NOT ILIKE ALL(VALUES('%pg_prepared_xacts%'), ('%COMMIT%'), ('%csa_from_one_node%')) AND is_worker_query = true AND backend_type = 'client backend' ORDER BY query DESC;
-query |citus_nodename_for_nodeid|citus_nodeport_for_nodeid|state |wait_event_type|wait_event|usename |datname
+query |citus_nodename_for_nodeid|citus_nodeport_for_nodeid|state |wait_event_type|wait_event|usename |datname
---------------------------------------------------------------------
-SELECT worker_apply_shard_ddl_command (1300004, 'public', '
- ALTER TABLE test_table ADD COLUMN x INT;
-')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
-SELECT worker_apply_shard_ddl_command (1300003, 'public', '
- ALTER TABLE test_table ADD COLUMN x INT;
-')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
-SELECT worker_apply_shard_ddl_command (1300002, 'public', '
- ALTER TABLE test_table ADD COLUMN x INT;
-')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
-SELECT worker_apply_shard_ddl_command (1300001, 'public', '
- ALTER TABLE test_table ADD COLUMN x INT;
-')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
+SELECT worker_apply_shard_ddl_command (1300004, 'public', 'ALTER TABLE test_table ADD COLUMN x integer;')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
+SELECT worker_apply_shard_ddl_command (1300003, 'public', 'ALTER TABLE test_table ADD COLUMN x integer;')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
+SELECT worker_apply_shard_ddl_command (1300002, 'public', 'ALTER TABLE test_table ADD COLUMN x integer;')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
+SELECT worker_apply_shard_ddl_command (1300001, 'public', 'ALTER TABLE test_table ADD COLUMN x integer;')|localhost | 57636|idle in transaction|Client |ClientRead|postgres|regression
(4 rows)
step s2-rollback:
diff --git a/src/test/regress/expected/isolation_citus_pause_node.out b/src/test/regress/expected/isolation_citus_pause_node.out
new file mode 100644
index 000000000..dd796f768
--- /dev/null
+++ b/src/test/regress/expected/isolation_citus_pause_node.out
@@ -0,0 +1,317 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1-begin s2-begin s1-pause-node s2-insert-distributed s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-insert-distributed:
+ -- Execute the INSERT statement
+ insert into employee values(11,'e11',3);
+
+step s1-end:
+ COMMIT;
+
+step s2-insert-distributed: <... completed>
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s2-begin s1-pause-node s2-delete-distributed s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-delete-distributed:
+ -- Execute the DELETE statement
+ delete from employee where id = 9;
+
+step s1-end:
+ COMMIT;
+
+step s2-delete-distributed: <... completed>
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s1-pause-node s2-begin s2-select-distributed s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-begin:
+ BEGIN;
+
+step s2-select-distributed:
+ select * from employee where id = 10;
+
+id|name|company_id
+---------------------------------------------------------------------
+10|e10 | 3
+(1 row)
+
+step s1-end:
+ COMMIT;
+
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s2-begin s1-pause-node s2-insert-reference s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-insert-reference:
+ -- Execute the INSERT statement
+ insert into city values(3,'city3');
+
+step s1-end:
+ COMMIT;
+
+step s2-insert-reference: <... completed>
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s1-pause-node s1-pause-node s1-end
+step s1-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s1-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s1-node-not-found s1-end
+step s1-begin:
+ BEGIN;
+
+s1: NOTICE: Node not found.
+step s1-node-not-found:
+ DO $$
+ DECLARE
+ v_node_id int:= -1;
+ v_node_exists boolean := true;
+ v_exception_message text;
+ v_expected_exception_message text := '';
+ BEGIN
+ select nextval('pg_dist_node_nodeid_seq')::int into v_node_id;
+ select citus_pause_node_within_txn(v_node_id) ;
+ EXCEPTION
+ WHEN SQLSTATE 'P0002' THEN
+ GET STACKED DIAGNOSTICS v_exception_message = MESSAGE_TEXT;
+ v_expected_exception_message := 'node ' || v_node_id || ' not found';
+ if v_exception_message = v_expected_exception_message then
+ RAISE NOTICE 'Node not found.';
+ end if;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s1-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s2-begin s2-insert-distributed s1-pause-node-force s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+step s2-insert-distributed:
+ -- Execute the INSERT statement
+ insert into employee values(11,'e11',3);
+
+step s1-pause-node-force:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ v_force boolean := true;
+ v_lock_cooldown int := 100;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node with force true
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id,v_force,v_lock_cooldown) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+s1: NOTICE:
+step s1-pause-node-force: <... completed>
+step s1-end:
+ COMMIT;
+
+step s2-end:
+ COMMIT;
+
+FATAL: terminating connection due to administrator command
+SSL connection has been closed unexpectedly
+
diff --git a/src/test/regress/expected/isolation_citus_pause_node_1.out b/src/test/regress/expected/isolation_citus_pause_node_1.out
new file mode 100644
index 000000000..7b84ecd72
--- /dev/null
+++ b/src/test/regress/expected/isolation_citus_pause_node_1.out
@@ -0,0 +1,318 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1-begin s2-begin s1-pause-node s2-insert-distributed s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-insert-distributed:
+ -- Execute the INSERT statement
+ insert into employee values(11,'e11',3);
+
+step s1-end:
+ COMMIT;
+
+step s2-insert-distributed: <... completed>
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s2-begin s1-pause-node s2-delete-distributed s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-delete-distributed:
+ -- Execute the DELETE statement
+ delete from employee where id = 9;
+
+step s1-end:
+ COMMIT;
+
+step s2-delete-distributed: <... completed>
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s1-pause-node s2-begin s2-select-distributed s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-begin:
+ BEGIN;
+
+step s2-select-distributed:
+ select * from employee where id = 10;
+
+id|name|company_id
+---------------------------------------------------------------------
+10|e10 | 3
+(1 row)
+
+step s1-end:
+ COMMIT;
+
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s2-begin s1-pause-node s2-insert-reference s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s2-insert-reference:
+ -- Execute the INSERT statement
+ insert into city values(3,'city3');
+
+step s1-end:
+ COMMIT;
+
+step s2-insert-reference: <... completed>
+step s2-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s1-pause-node s1-pause-node s1-end
+step s1-begin:
+ BEGIN;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+s1: NOTICE:
+step s1-pause-node:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s1-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s1-node-not-found s1-end
+step s1-begin:
+ BEGIN;
+
+s1: NOTICE: Node not found.
+step s1-node-not-found:
+ DO $$
+ DECLARE
+ v_node_id int:= -1;
+ v_node_exists boolean := true;
+ v_exception_message text;
+ v_expected_exception_message text := '';
+ BEGIN
+ select nextval('pg_dist_node_nodeid_seq')::int into v_node_id;
+ select citus_pause_node_within_txn(v_node_id) ;
+ EXCEPTION
+ WHEN SQLSTATE 'P0002' THEN
+ GET STACKED DIAGNOSTICS v_exception_message = MESSAGE_TEXT;
+ v_expected_exception_message := 'node ' || v_node_id || ' not found';
+ if v_exception_message = v_expected_exception_message then
+ RAISE NOTICE 'Node not found.';
+ end if;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+step s1-end:
+ COMMIT;
+
+
+starting permutation: s1-begin s2-begin s2-insert-distributed s1-pause-node-force s1-end s2-end
+step s1-begin:
+ BEGIN;
+
+step s2-begin:
+ BEGIN;
+
+step s2-insert-distributed:
+ -- Execute the INSERT statement
+ insert into employee values(11,'e11',3);
+
+step s1-pause-node-force:
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ v_force boolean := true;
+ v_lock_cooldown int := 100;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+ -- Pause the node with force true
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id,v_force,v_lock_cooldown) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+
+s1: NOTICE:
+step s1-pause-node-force: <... completed>
+step s1-end:
+ COMMIT;
+
+step s2-end:
+ COMMIT;
+
+FATAL: terminating connection due to administrator command
+FATAL: terminating connection due to administrator command
+SSL connection has been closed unexpectedly
+
diff --git a/src/test/regress/expected/isolation_pg_send_cancellation.out b/src/test/regress/expected/isolation_pg_send_cancellation.out
deleted file mode 100644
index 4b1475352..000000000
--- a/src/test/regress/expected/isolation_pg_send_cancellation.out
+++ /dev/null
@@ -1,42 +0,0 @@
-Parsed test spec with 2 sessions
-
-starting permutation: s1-register s2-lock s1-lock s2-wrong-cancel-1 s2-wrong-cancel-2 s2-cancel
-step s1-register:
- INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
-
-step s2-lock:
- BEGIN;
- LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
-
-step s1-lock:
- BEGIN;
- LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
- END;
-
-step s2-wrong-cancel-1:
- SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
-
-run_pg_send_cancellation
----------------------------------------------------------------------
-
-(1 row)
-
-step s2-wrong-cancel-2:
- SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
-
-run_pg_send_cancellation
----------------------------------------------------------------------
-
-(1 row)
-
-step s2-cancel:
- SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
- END;
-
-run_pg_send_cancellation
----------------------------------------------------------------------
-
-(1 row)
-
-step s1-lock: <... completed>
-ERROR: canceling statement due to user request
diff --git a/src/test/regress/expected/isolation_shard_rebalancer_progress.out b/src/test/regress/expected/isolation_shard_rebalancer_progress.out
index 8553a1d4d..90c78ca62 100644
--- a/src/test/regress/expected/isolation_shard_rebalancer_progress.out
+++ b/src/test/regress/expected/isolation_shard_rebalancer_progress.out
@@ -19,7 +19,7 @@ step s1-rebalance-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -40,8 +40,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
colocated1|1500002| 200000|localhost | 57637| 200000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
colocated2|1500006| 8000|localhost | 57637| 8000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
(4 rows)
@@ -63,7 +63,7 @@ rebalance_table_shards
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -102,7 +102,7 @@ step s1-rebalance-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -123,8 +123,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 2|move |t |t |f |Completed
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 2|move |t |t |f |Completed
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 2|move |t |t |f |Completed
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 2|move |t |t |f |Completed
colocated1|1500002| 200000|localhost | 57637| 200000|localhost | 57638| 0| 1|move |t |t |f |Setting Up
colocated2|1500006| 8000|localhost | 57637| 8000|localhost | 57638| 0| 1|move |t |t |f |Setting Up
(4 rows)
@@ -141,7 +141,7 @@ rebalance_table_shards
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -184,7 +184,7 @@ step s1-rebalance-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -205,8 +205,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|move |t |t |f |Copying Data
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|move |t |t |f |Copying Data
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|move |t |t |f |Copying Data
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|move |t |t |f |Copying Data
colocated1|1500002| 200000|localhost | 57637| 200000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
colocated2|1500006| 8000|localhost | 57637| 8000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
(4 rows)
@@ -228,7 +228,7 @@ rebalance_table_shards
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -271,7 +271,7 @@ step s1-rebalance-c1-online:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -292,8 +292,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
colocated1|1500002| 200000|localhost | 57637| 200000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
colocated2|1500006| 8000|localhost | 57637| 8000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
(4 rows)
@@ -315,7 +315,7 @@ rebalance_table_shards
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -358,7 +358,7 @@ step s1-rebalance-c1-online:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -379,8 +379,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|move |t |t |t |Final Catchup
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|move |t |t |t |Final Catchup
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|move |t |t |t |Final Catchup
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|move |t |t |t |Final Catchup
colocated1|1500002| 200000|localhost | 57637| 200000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
colocated2|1500006| 8000|localhost | 57637| 8000|localhost | 57638| 0| 0|move |t |t |f |Not Started Yet
(4 rows)
@@ -402,7 +402,7 @@ rebalance_table_shards
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -445,7 +445,7 @@ step s1-shard-move-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -466,8 +466,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|move |t |t |f |Copying Data
(2 rows)
step s5-release-advisory-lock:
@@ -487,7 +487,7 @@ citus_move_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -530,7 +530,7 @@ step s1-shard-move-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -551,8 +551,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|move |t |t |f |Copying Data
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|move |t |t |f |Copying Data
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|move |t |t |f |Copying Data
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|move |t |t |f |Copying Data
(2 rows)
step s6-release-advisory-lock:
@@ -572,7 +572,7 @@ citus_move_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -616,7 +616,7 @@ step s1-shard-copy-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -637,8 +637,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|copy |t |t |f |Copying Data
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|copy |t |t |f |Copying Data
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|copy |t |t |f |Copying Data
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|copy |t |t |f |Copying Data
(2 rows)
step s5-release-advisory-lock:
@@ -658,7 +658,7 @@ citus_copy_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -702,7 +702,7 @@ step s1-shard-copy-c1-block-writes:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -723,8 +723,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|copy |t |t |f |Copying Data
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|copy |t |t |f |Copying Data
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|copy |t |t |f |Copying Data
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|copy |t |t |f |Copying Data
(2 rows)
step s6-release-advisory-lock:
@@ -744,7 +744,7 @@ citus_copy_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -787,7 +787,7 @@ step s1-shard-move-c1-online:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -808,8 +808,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|move |t |t |f |Setting Up
(2 rows)
step s5-release-advisory-lock:
@@ -829,7 +829,7 @@ citus_move_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -872,7 +872,7 @@ step s1-shard-move-c1-online:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -893,8 +893,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|move |t |t |t |Final Catchup
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|move |t |t |t |Final Catchup
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|move |t |t |t |Final Catchup
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|move |t |t |t |Final Catchup
(2 rows)
step s6-release-advisory-lock:
@@ -914,7 +914,7 @@ citus_move_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -958,7 +958,7 @@ step s1-shard-copy-c1-online:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -979,8 +979,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|copy |t |t |f |Setting Up
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|copy |t |t |f |Setting Up
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|copy |t |t |f |Setting Up
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|copy |t |t |f |Setting Up
(2 rows)
step s5-release-advisory-lock:
@@ -1000,7 +1000,7 @@ citus_copy_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -1044,7 +1044,7 @@ step s1-shard-copy-c1-online:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -1065,8 +1065,8 @@ step s7-get-progress:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available|status
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|copy |t |t |t |Final Catchup
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|copy |t |t |t |Final Catchup
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|copy |t |t |t |Final Catchup
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|copy |t |t |t |Final Catchup
(2 rows)
step s6-release-advisory-lock:
@@ -1086,7 +1086,7 @@ citus_copy_shard_placement
step s1-wait:
step s7-get-progress:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -1132,7 +1132,7 @@ step s4-shard-move-sep-block-writes:
step s7-get-progress-ordered:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -1153,9 +1153,9 @@ step s7-get-progress-ordered:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|move |t |t |f
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 8000| 1|move |t |t |f
-separate |1500009| 50000|localhost | 57637| 50000|localhost | 57638| 8000| 1|move |t |t |f
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 8000| 1|move |t |t |f
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 8000| 1|move |t |t |f
+separate |1500009| 200000|localhost | 57637| 200000|localhost | 57638| 8000| 1|move |t |t |f
(3 rows)
step s5-release-advisory-lock:
@@ -1182,7 +1182,7 @@ step s1-wait:
step s4-wait:
step s7-get-progress-ordered:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -1228,7 +1228,7 @@ step s4-shard-move-sep-block-writes:
step s7-get-progress-ordered:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -1249,9 +1249,9 @@ step s7-get-progress-ordered:
table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname|targetport|target_shard_size|progress|operation_type|lsn_sanity_check|source_lsn_available|target_lsn_available
---------------------------------------------------------------------
-colocated1|1500001| 50000|localhost | 57637| 50000|localhost | 57638| 50000| 1|move |t |t |f
-colocated2|1500005| 400000|localhost | 57637| 400000|localhost | 57638| 400000| 1|move |t |t |f
-separate |1500009| 50000|localhost | 57637| 50000|localhost | 57638| 200000| 1|move |t |t |f
+colocated1|1500001| 40000|localhost | 57637| 40000|localhost | 57638| 40000| 1|move |t |t |f
+colocated2|1500005| 480000|localhost | 57637| 480000|localhost | 57638| 480000| 1|move |t |t |f
+separate |1500009| 200000|localhost | 57637| 200000|localhost | 57638| 200000| 1|move |t |t |f
(3 rows)
step s6-release-advisory-lock:
@@ -1278,7 +1278,7 @@ step s1-wait:
step s4-wait:
step s7-get-progress-ordered:
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out
index 86615648c..1a1c65ec8 100644
--- a/src/test/regress/expected/isolation_update_node.out
+++ b/src/test/regress/expected/isolation_update_node.out
@@ -3,8 +3,8 @@ Parsed test spec with 3 sessions
starting permutation: s1-begin s1-update-node-1 s2-update-node-2 s1-commit s1-show-nodes s3-update-node-1-back s3-update-node-2-back s3-manually-fix-metadata
nodeid|nodename |nodeport
---------------------------------------------------------------------
- 22|localhost| 57638
- 21|localhost| 57637
+ 23|localhost| 57638
+ 22|localhost| 57637
(2 rows)
step s1-begin:
@@ -43,8 +43,8 @@ step s1-show-nodes:
nodeid|nodename |nodeport|isactive
---------------------------------------------------------------------
- 21|localhost| 58637|t
- 22|localhost| 58638|t
+ 22|localhost| 58637|t
+ 23|localhost| 58638|t
(2 rows)
step s3-update-node-1-back:
@@ -93,8 +93,8 @@ nodeid|nodename|nodeport
starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes s3-update-node-1-back s3-manually-fix-metadata
nodeid|nodename |nodeport
---------------------------------------------------------------------
- 24|localhost| 57638
- 23|localhost| 57637
+ 25|localhost| 57638
+ 24|localhost| 57637
(2 rows)
step s1-begin:
@@ -139,8 +139,8 @@ step s1-show-nodes:
nodeid|nodename |nodeport|isactive
---------------------------------------------------------------------
- 24|localhost| 57638|t
- 23|localhost| 58637|t
+ 25|localhost| 57638|t
+ 24|localhost| 58637|t
(2 rows)
step s3-update-node-1-back:
@@ -178,8 +178,8 @@ nodeid|nodename|nodeport
starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s3-manually-fix-metadata
nodeid|nodename |nodeport
---------------------------------------------------------------------
- 26|localhost| 57638
- 25|localhost| 57637
+ 27|localhost| 57638
+ 26|localhost| 57637
(2 rows)
step s2-create-table:
diff --git a/src/test/regress/expected/local_dist_join_mixed.out b/src/test/regress/expected/local_dist_join_mixed.out
index 20287ee35..b8f074c73 100644
--- a/src/test/regress/expected/local_dist_join_mixed.out
+++ b/src/test/regress/expected/local_dist_join_mixed.out
@@ -357,13 +357,13 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
101
(1 row)
-CREATE VIEW local_regular_view AS SELECT * FROM local;
+CREATE VIEW local_regular_view AS SELECT * FROM local table_name_for_view;
WARNING: "view local_regular_view" has dependency to "table local" that is not in Citus' metadata
DETAIL: "view local_regular_view" will be created only locally
HINT: Distribute "table local" first to distribute "view local_regular_view"
CREATE VIEW dist_regular_view AS SELECT * FROM distributed;
SELECT count(*) FROM distributed JOIN local_regular_view USING (id);
-DEBUG: generating subplan XXX_1 for subquery SELECT local.id, local.title FROM local_dist_join_mixed.local
+DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM local_dist_join_mixed.local table_name_for_view
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (local_dist_join_mixed.distributed JOIN (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title text)) local_regular_view USING (id))
count
---------------------------------------------------------------------
@@ -380,7 +380,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
(1 row)
SELECT count(*) FROM dist_regular_view JOIN local_regular_view USING (id);
-DEBUG: generating subplan XXX_1 for subquery SELECT local.id, local.title FROM local_dist_join_mixed.local
+DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM local_dist_join_mixed.local table_name_for_view
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed.id, distributed.name, distributed.created_at FROM local_dist_join_mixed.distributed) dist_regular_view JOIN (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title text)) local_regular_view USING (id))
count
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/local_shard_copy.out b/src/test/regress/expected/local_shard_copy.out
index 37839d472..c5ea51207 100644
--- a/src/test/regress/expected/local_shard_copy.out
+++ b/src/test/regress/expected/local_shard_copy.out
@@ -28,9 +28,11 @@ SELECT create_distributed_table('distributed_table','key');
(1 row)
INSERT INTO distributed_table SELECT *,* FROM generate_series(20, 40);
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO reference_table SELECT * FROM generate_series(1, 10);
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
CREATE TABLE local_table (key int PRIMARY KEY);
diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out
index e70dc1102..f6e4db7ee 100644
--- a/src/test/regress/expected/local_shard_execution.out
+++ b/src/test/regress/expected/local_shard_execution.out
@@ -299,10 +299,10 @@ RETURNING *;
-- INSERT..SELECT via coordinator consists of two steps, select + COPY
-- that's why it is disallowed to use local execution even if the SELECT
-- can be executed locally
-INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
-NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) OFFSET 0
+INSERT INTO distributed_table SELECT sum(key), value FROM distributed_table WHERE key = 1 GROUP BY value ON CONFLICT DO NOTHING;
+NOTICE: executing the command locally: SELECT int4(sum(key)) AS key, value FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value
NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('insert_select_XXX_1470001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
+NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value) SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('insert_select_XXX_1470001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text) ON CONFLICT DO NOTHING
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
-- sanity check: multi-shard INSERT..SELECT pushdown goes through distributed execution
INSERT INTO distributed_table SELECT * FROM distributed_table ON CONFLICT DO NOTHING;
@@ -752,46 +752,46 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
(1 row)
SELECT count(*) FROM distributed_table d1 join distributed_table d2 using(age);
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_66_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_66_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_66_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_66_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_0,repartition_65_2_0,repartition_65_3_0,repartition_65_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_0,repartition_66_2_0,repartition_66_3_0,repartition_66_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_1,repartition_65_2_1,repartition_65_3_1,repartition_65_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_1,repartition_66_2_1,repartition_66_3_1,repartition_66_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_2,repartition_65_2_2,repartition_65_3_2,repartition_65_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_2,repartition_66_2_2,repartition_66_3_2,repartition_66_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_3,repartition_65_2_3,repartition_65_3_3,repartition_65_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_3,repartition_66_2_3,repartition_66_3_3,repartition_66_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_71_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_71_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_71_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_71_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_0,repartition_70_2_0,repartition_70_3_0,repartition_70_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_0,repartition_71_2_0,repartition_71_3_0,repartition_71_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_1,repartition_70_2_1,repartition_70_3_1,repartition_70_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_1,repartition_71_2_1,repartition_71_3_1,repartition_71_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_2,repartition_70_2_2,repartition_70_3_2,repartition_70_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_2,repartition_71_2_2,repartition_71_3_2,repartition_71_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_3,repartition_70_2_3,repartition_70_3_3,repartition_70_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_3,repartition_71_2_3,repartition_71_3_3,repartition_71_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
2
diff --git a/src/test/regress/expected/local_shard_execution_0.out b/src/test/regress/expected/local_shard_execution_0.out
index c7f002cad..8c4fbfd74 100644
--- a/src/test/regress/expected/local_shard_execution_0.out
+++ b/src/test/regress/expected/local_shard_execution_0.out
@@ -299,10 +299,10 @@ RETURNING *;
-- INSERT..SELECT via coordinator consists of two steps, select + COPY
-- that's why it is disallowed to use local execution even if the SELECT
-- can be executed locally
-INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
-NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) OFFSET 0
+INSERT INTO distributed_table SELECT sum(key), value FROM distributed_table WHERE key = 1 GROUP BY value ON CONFLICT DO NOTHING;
+NOTICE: executing the command locally: SELECT int4(sum(key)) AS key, value FROM local_shard_execution.distributed_table_1470001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value
NOTICE: executing the copy locally for colocated file with shard xxxxx
-NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) SELECT key, value, age FROM read_intermediate_result('insert_select_XXX_1470001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
+NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_1470001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text) ON CONFLICT DO NOTHING
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
-- sanity check: multi-shard INSERT..SELECT pushdown goes through distributed execution
INSERT INTO distributed_table SELECT * FROM distributed_table ON CONFLICT DO NOTHING;
@@ -752,46 +752,46 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
(1 row)
SELECT count(*) FROM distributed_table d1 join distributed_table d2 using(age);
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_66_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_66_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_66_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_66_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_66_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_0,repartition_65_2_0,repartition_65_3_0,repartition_65_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_0,repartition_66_2_0,repartition_66_3_0,repartition_66_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_1,repartition_65_2_1,repartition_65_3_1,repartition_65_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_1,repartition_66_2_1,repartition_66_3_1,repartition_66_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_2,repartition_65_2_2,repartition_65_3_2,repartition_65_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_2,repartition_66_2_2,repartition_66_3_2,repartition_66_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_65_1_3,repartition_65_2_3,repartition_65_3_3,repartition_65_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_66_1_3,repartition_66_2_3,repartition_66_3_3,repartition_66_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_71_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_71_1','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_71_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_71_3','SELECT age AS column1 FROM local_shard_execution.distributed_table_1470003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_71_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_0,repartition_70_2_0,repartition_70_3_0,repartition_70_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_0,repartition_71_2_0,repartition_71_3_0,repartition_71_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_1,repartition_70_2_1,repartition_70_3_1,repartition_70_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_1,repartition_71_2_1,repartition_71_3_1,repartition_71_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_2,repartition_70_2_2,repartition_70_3_2,repartition_70_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_2,repartition_71_2_2,repartition_71_3_2,repartition_71_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_70_1_3,repartition_70_2_3,repartition_70_3_3,repartition_70_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_71_1_3,repartition_71_2_3,repartition_71_3_3,repartition_71_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
2
diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out
index 07da961c2..d0593db4a 100644
--- a/src/test/regress/expected/local_shard_execution_replicated.out
+++ b/src/test/regress/expected/local_shard_execution_replicated.out
@@ -236,8 +236,8 @@ RETURNING *;
-- INSERT..SELECT via coordinator consists of two steps, select + COPY
-- that's why it is disallowed to use local execution even if the SELECT
-- can be executed locally
-INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
-NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) OFFSET 0
+INSERT INTO distributed_table SELECT sum(key), value, max(age) FROM distributed_table WHERE key = 1 GROUP BY value ON CONFLICT DO NOTHING;
+NOTICE: executing the command locally: SELECT int4(sum(key)) AS key, value, max(age) AS age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value
NOTICE: executing the copy locally for colocated file with shard xxxxx
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) SELECT intermediate_result.key, intermediate_result.value, intermediate_result.age FROM read_intermediate_result('insert_select_XXX_1500001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
@@ -712,46 +712,46 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
(1 row)
SELECT count(*) FROM distributed_table d1 join distributed_table d2 using(age);
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_64_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_64_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_64_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_64_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_0,repartition_64_2_0,repartition_64_3_0,repartition_64_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_0,repartition_65_2_0,repartition_65_3_0,repartition_65_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_1,repartition_64_2_1,repartition_64_3_1,repartition_64_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_1,repartition_65_2_1,repartition_65_3_1,repartition_65_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_2,repartition_64_2_2,repartition_64_3_2,repartition_64_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_2,repartition_65_2_2,repartition_65_3_2,repartition_65_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_3,repartition_64_2_3,repartition_64_3_3,repartition_64_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_3,repartition_65_2_3,repartition_65_3_3,repartition_65_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_69_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_69_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_69_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_69_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_0,repartition_69_2_0,repartition_69_3_0,repartition_69_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_0,repartition_70_2_0,repartition_70_3_0,repartition_70_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_1,repartition_69_2_1,repartition_69_3_1,repartition_69_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_1,repartition_70_2_1,repartition_70_3_1,repartition_70_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_2,repartition_69_2_2,repartition_69_3_2,repartition_69_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_2,repartition_70_2_2,repartition_70_3_2,repartition_70_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_3,repartition_69_2_3,repartition_69_3_3,repartition_69_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_3,repartition_70_2_3,repartition_70_3_3,repartition_70_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
2
@@ -1016,7 +1016,8 @@ WHERE
distributed_table.value = all_data.value AND distributed_table.key = 1
ORDER BY
1 DESC;
-NOTICE: executing the command locally: SELECT distributed_table.key FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table, (SELECT second_distributed_table.key, second_distributed_table.value FROM local_shard_execution_replicated.second_distributed_table_1500008 second_distributed_table WHERE (second_distributed_table.key OPERATOR(pg_catalog.=) 2)) all_data WHERE ((distributed_table.value OPERATOR(pg_catalog.=) all_data.value) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) ORDER BY distributed_table.key DESC
+NOTICE: executing the command locally: SELECT key, value FROM local_shard_execution_replicated.second_distributed_table_1500008 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 2)
+NOTICE: executing the command locally: SELECT distributed_table.key FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table, (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) all_data WHERE ((distributed_table.value OPERATOR(pg_catalog.=) all_data.value) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) ORDER BY distributed_table.key DESC
key
---------------------------------------------------------------------
1
diff --git a/src/test/regress/expected/local_shard_execution_replicated_0.out b/src/test/regress/expected/local_shard_execution_replicated_0.out
index c913bf628..992ff6b81 100644
--- a/src/test/regress/expected/local_shard_execution_replicated_0.out
+++ b/src/test/regress/expected/local_shard_execution_replicated_0.out
@@ -236,8 +236,8 @@ RETURNING *;
-- INSERT..SELECT via coordinator consists of two steps, select + COPY
-- that's why it is disallowed to use local execution even if the SELECT
-- can be executed locally
-INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
-NOTICE: executing the command locally: SELECT key, value, age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) OFFSET 0
+INSERT INTO distributed_table SELECT sum(key), value, max(age) FROM distributed_table WHERE key = 1 GROUP BY value ON CONFLICT DO NOTHING;
+NOTICE: executing the command locally: SELECT int4(sum(key)) AS key, value, max(age) AS age FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table WHERE (key OPERATOR(pg_catalog.=) 1) GROUP BY value
NOTICE: executing the copy locally for colocated file with shard xxxxx
NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) SELECT key, value, age FROM read_intermediate_result('insert_select_XXX_1500001'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text, age bigint) ON CONFLICT DO NOTHING
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
@@ -712,46 +712,46 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_shar
(1 row)
SELECT count(*) FROM distributed_table d1 join distributed_table d2 using(age);
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_64_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_64_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_64_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_64_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT partition_index, 'repartition_65_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_65_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_0']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_0']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_1']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_1']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_2']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_2']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_64_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_1_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_2_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_3_3']::text[],'localhost',57637) bytes
-NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_65_4_3']::text[],'localhost',57638) bytes
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_0,repartition_64_2_0,repartition_64_3_0,repartition_64_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_0,repartition_65_2_0,repartition_65_3_0,repartition_65_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_1,repartition_64_2_1,repartition_64_3_1,repartition_64_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_1,repartition_65_2_1,repartition_65_3_1,repartition_65_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_2,repartition_64_2_2,repartition_64_3_2,repartition_64_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_2,repartition_65_2_2,repartition_65_3_2,repartition_65_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
-NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_64_1_3,repartition_64_2_3,repartition_64_3_3,repartition_64_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_65_1_3,repartition_65_2_3,repartition_65_3_3,repartition_65_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_69_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_69_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_69_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_69_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d1 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_1' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_1','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500001 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT partition_index, 'repartition_70_3' || '_' || partition_index::text , rows_written FROM pg_catalog.worker_partition_query_result('repartition_70_3','SELECT age AS column1 FROM local_shard_execution_replicated.distributed_table_1500003 d2 WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true,true,true) WHERE rows_written > 0
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_0']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_0']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_1']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_1']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_2']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_2']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_69_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_1_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_2_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_3_3']::text[],'localhost',57637) bytes
+NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartition_70_4_3']::text[],'localhost',57638) bytes
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_0,repartition_69_2_0,repartition_69_3_0,repartition_69_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_0,repartition_70_2_0,repartition_70_3_0,repartition_70_4_0}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_1,repartition_69_2_1,repartition_69_3_1,repartition_69_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_1,repartition_70_2_1,repartition_70_3_1,repartition_70_4_1}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_2,repartition_69_2_2,repartition_69_3_2,repartition_69_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_2,repartition_70_2_2,repartition_70_3_2,repartition_70_4_2}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
+NOTICE: executing the command locally: SELECT count(*) AS count FROM (read_intermediate_results('{repartition_69_1_3,repartition_69_2_3,repartition_69_3_3,repartition_69_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result(column1 bigint) JOIN read_intermediate_results('{repartition_70_1_3,repartition_70_2_3,repartition_70_3_3,repartition_70_4_3}'::text[], 'binary'::citus_copy_format) intermediate_result_1(column1 bigint) ON ((intermediate_result.column1 OPERATOR(pg_catalog.=) intermediate_result_1.column1))) WHERE true
count
---------------------------------------------------------------------
2
@@ -1016,7 +1016,8 @@ WHERE
distributed_table.value = all_data.value AND distributed_table.key = 1
ORDER BY
1 DESC;
-NOTICE: executing the command locally: SELECT distributed_table.key FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table, (SELECT second_distributed_table.key, second_distributed_table.value FROM local_shard_execution_replicated.second_distributed_table_1500008 second_distributed_table WHERE (second_distributed_table.key OPERATOR(pg_catalog.=) 2)) all_data WHERE ((distributed_table.value OPERATOR(pg_catalog.=) all_data.value) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) ORDER BY distributed_table.key DESC
+NOTICE: executing the command locally: SELECT key, value FROM local_shard_execution_replicated.second_distributed_table_1500008 second_distributed_table WHERE (key OPERATOR(pg_catalog.=) 2)
+NOTICE: executing the command locally: SELECT distributed_table.key FROM local_shard_execution_replicated.distributed_table_1500001 distributed_table, (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) all_data WHERE ((distributed_table.value OPERATOR(pg_catalog.=) all_data.value) AND (distributed_table.key OPERATOR(pg_catalog.=) 1)) ORDER BY distributed_table.key DESC
key
---------------------------------------------------------------------
1
diff --git a/src/test/regress/expected/local_shard_utility_command_execution.out b/src/test/regress/expected/local_shard_utility_command_execution.out
index ba70ddf74..eeacd7f4f 100644
--- a/src/test/regress/expected/local_shard_utility_command_execution.out
+++ b/src/test/regress/expected/local_shard_utility_command_execution.out
@@ -509,7 +509,7 @@ NOTICE: executing the command locally: DELETE FROM local_commands_test_schema.r
-- add another column to dist_table
-- note that we execute below DDL locally as well
ALTER TABLE ref_table ADD b int;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500035, 'local_commands_test_schema', 'ALTER TABLE ref_table ADD b int;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500035, 'local_commands_test_schema', 'ALTER TABLE ref_table ADD COLUMN b integer;')
-- define self reference
ALTER TABLE ref_table ADD CONSTRAINT fkey2 FOREIGN KEY(b) REFERENCES ref_table(a);
NOTICE: executing the command locally: SELECT worker_apply_inter_shard_ddl_command (1500035, 'local_commands_test_schema', 1500035, 'local_commands_test_schema', 'ALTER TABLE ref_table ADD CONSTRAINT fkey2 FOREIGN KEY(b) REFERENCES ref_table(a);')
@@ -629,17 +629,17 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_comm
-- execute bunch of DDL & DROP commands succesfully
ALTER TABLE dist_table ADD column c int;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500100, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500103, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500106, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500109, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500112, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500115, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500118, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500121, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500124, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500127, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500130, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD column c int;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500100, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500103, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500106, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500109, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500112, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500115, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500118, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500121, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500124, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500127, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500130, 'local_commands_test_schema', 'ALTER TABLE dist_table ADD COLUMN c integer;')
ALTER TABLE dist_table ALTER COLUMN c SET NOT NULL;
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500100, 'local_commands_test_schema', 'ALTER TABLE dist_table ALTER COLUMN c SET NOT NULL;')
NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500103, 'local_commands_test_schema', 'ALTER TABLE dist_table ALTER COLUMN c SET NOT NULL;')
@@ -758,17 +758,17 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM local_comm
-- execute bunch of DDL & DROP commands succesfully
ALTER TABLE partitioning_test ADD column c int;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500165, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500168, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500171, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500174, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500177, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500180, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500183, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500186, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500189, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500192, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500195, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD column c int;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500165, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500168, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500171, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500174, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500177, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500180, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500183, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500186, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500189, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500192, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1500195, 'local_commands_test_schema', 'ALTER TABLE partitioning_test ADD COLUMN c integer;')
TRUNCATE partitioning_test;
NOTICE: executing the command locally: TRUNCATE TABLE local_commands_test_schema.partitioning_test_xxxxx CASCADE
NOTICE: executing the command locally: TRUNCATE TABLE local_commands_test_schema.partitioning_test_xxxxx CASCADE
diff --git a/src/test/regress/expected/local_table_join.out b/src/test/regress/expected/local_table_join.out
index 7da341207..297959d41 100644
--- a/src/test/regress/expected/local_table_join.out
+++ b/src/test/regress/expected/local_table_join.out
@@ -1370,9 +1370,6 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
- where (
- select true as bool from pg_catalog.pg_am limit 1
- )
) as subq_1
) as subq_2;
typdefault
@@ -1400,15 +1397,11 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
- where (
- select true as bool from pg_catalog.pg_am limit 1
- )
) as subq_1
) as subq_2;
-DEBUG: generating subplan XXX_1 for subquery SELECT true AS bool FROM pg_am LIMIT 1
DEBUG: Wrapping relation "custom_pg_type" to a subquery
-DEBUG: generating subplan XXX_2 for subquery SELECT typdefault FROM local_table_join.custom_pg_type WHERE true
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT typdefault FROM (SELECT subq_1.typdefault FROM (SELECT custom_pg_type.typdefault FROM (SELECT custom_pg_type_1.typdefault FROM (SELECT intermediate_result.typdefault FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(typdefault text)) custom_pg_type_1) custom_pg_type, LATERAL (SELECT tbl.a FROM local_table_join.tbl WHERE (custom_pg_type.typdefault OPERATOR(pg_catalog.>) 'a'::text) LIMIT 1) subq_0 WHERE (SELECT intermediate_result.bool FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(bool boolean))) subq_1) subq_2
+DEBUG: generating subplan XXX_1 for subquery SELECT typdefault FROM local_table_join.custom_pg_type WHERE true
+DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT typdefault FROM (SELECT subq_1.typdefault FROM (SELECT custom_pg_type.typdefault FROM (SELECT custom_pg_type_1.typdefault FROM (SELECT intermediate_result.typdefault FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(typdefault text)) custom_pg_type_1) custom_pg_type, LATERAL (SELECT tbl.a FROM local_table_join.tbl WHERE (custom_pg_type.typdefault OPERATOR(pg_catalog.>) 'a'::text) LIMIT 1) subq_0) subq_1) subq_2
ERROR: cannot push down this subquery
DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables
-- Not supported because of 4470
diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out
index 3cb69936c..a73467e81 100644
--- a/src/test/regress/expected/merge.out
+++ b/src/test/regress/expected/merge.out
@@ -3065,7 +3065,7 @@ WHEN MATCHED AND t.customer_id = 200 THEN
DELETE
WHEN NOT MATCHED THEN
INSERT VALUES(s.customer_id, s.order_id, s.order_center, 1, s.order_time);
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
SELECT * FROM target_filter ORDER BY 1, 2;
customer_id | last_order_id | order_center | order_count | last_order
@@ -3414,7 +3414,7 @@ MERGE INTO t1
UPDATE SET val = t1.val + 1
WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val);
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
-- Join condition without target distribution column
WITH s1_res AS (
@@ -3424,7 +3424,7 @@ WITH s1_res AS (
WHEN MATCHED THEN DELETE
WHEN NOT MATCHED THEN
INSERT (id, val) VALUES (s1_res.id, s1_res.val);
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
--
-- Reference tables
@@ -3816,195 +3816,6 @@ UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
ERROR: For MERGE command, append/range distribution table is not supported yet
--- test merge with single-shard tables
-CREATE SCHEMA query_single_shard_table;
-SET search_path TO query_single_shard_table;
-CREATE TABLE nullkey_c1_t1(a int, b int);
-CREATE TABLE nullkey_c1_t2(a int, b int);
-SELECT create_distributed_table('nullkey_c1_t1', null, colocate_with=>'none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('nullkey_c1_t2', null, colocate_with=>'nullkey_c1_t1');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE nullkey_c2_t1(a int, b int);
-CREATE TABLE nullkey_c2_t2(a int, b int);
-SELECT create_distributed_table('nullkey_c2_t1', null, colocate_with=>'none');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('nullkey_c2_t2', null, colocate_with=>'nullkey_c2_t1', distribution_type=>null);
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE TABLE reference_table(a int, b int);
-CREATE TABLE distributed_table(a int, b int);
-CREATE TABLE citus_local_table(a int, b int);
-SELECT create_reference_table('reference_table');
- create_reference_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT create_distributed_table('distributed_table', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-SELECT citus_add_local_table_to_metadata('citus_local_table');
- citus_add_local_table_to_metadata
----------------------------------------------------------------------
-
-(1 row)
-
-SET client_min_messages TO DEBUG2;
-INSERT INTO reference_table SELECT i, i FROM generate_series(0, 5) i;
-DEBUG: distributed INSERT ... SELECT can only select from distributed tables
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-INSERT INTO distributed_table SELECT i, i FROM generate_series(3, 8) i;
-DEBUG: distributed INSERT ... SELECT can only select from distributed tables
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-INSERT INTO citus_local_table SELECT i, i FROM generate_series(0, 10) i;
-DEBUG: distributed INSERT ... SELECT can only select from distributed tables
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-CREATE TABLE postgres_local_table(a int, b int);
-INSERT INTO postgres_local_table SELECT i, i FROM generate_series(5, 10) i;
--- with a colocated table
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b;
-DEBUG:
-DEBUG: Creating MERGE router plan
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN DELETE;
-DEBUG:
-DEBUG: Creating MERGE router plan
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
-DEBUG:
-DEBUG: Creating MERGE router plan
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN DELETE
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
-DEBUG:
-DEBUG: Creating MERGE router plan
--- with non-colocated single-shard table
-MERGE INTO nullkey_c1_t1 USING nullkey_c2_t1 ON (nullkey_c1_t1.a = nullkey_c2_t1.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b;
-DEBUG: Distributed tables are not co-located, try repartitioning
-DEBUG: For MERGE command, all the distributed tables must be colocated
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
-MERGE INTO nullkey_c1_t1 USING nullkey_c2_t1 ON (nullkey_c1_t1.a = nullkey_c2_t1.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b);
-DEBUG: Distributed tables are not co-located, try repartitioning
-DEBUG: For MERGE command, all the distributed tables must be colocated
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
--- with a distributed table
-MERGE INTO nullkey_c1_t1 USING distributed_table ON (nullkey_c1_t1.a = distributed_table.a)
-WHEN MATCHED THEN UPDATE SET b = distributed_table.b
-WHEN NOT MATCHED THEN INSERT VALUES (distributed_table.a, distributed_table.b);
-DEBUG: Distributed tables are not co-located, try repartitioning
-DEBUG: For MERGE command, all the distributed tables must be colocated
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
-MERGE INTO distributed_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = distributed_table.a)
-WHEN MATCHED THEN DELETE
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
-DEBUG: Distributed tables are not co-located, try repartitioning
-DEBUG: For MERGE command, all the distributed tables must be colocated
-DEBUG: Creating MERGE repartition plan
-DEBUG: Using column - index:0 from the source list to redistribute
-DEBUG: Distributed planning for a fast-path router query
-DEBUG: Creating router plan
-DEBUG: Collect source query results on coordinator
-DEBUG: Create a MERGE task list that needs to be routed
-DEBUG:
-DEBUG: distributed statement: MERGE INTO query_single_shard_table.distributed_table_4000189 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4000189'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
-DEBUG:
-DEBUG: distributed statement: MERGE INTO query_single_shard_table.distributed_table_4000190 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4000190'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
-DEBUG:
-DEBUG: distributed statement: MERGE INTO query_single_shard_table.distributed_table_4000191 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4000191'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
-DEBUG:
-DEBUG: distributed statement: MERGE INTO query_single_shard_table.distributed_table_4000192 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4000192'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
--- with a reference table
-MERGE INTO nullkey_c1_t1 USING reference_table ON (nullkey_c1_t1.a = reference_table.a)
-WHEN MATCHED THEN UPDATE SET b = reference_table.b;
-DEBUG: A mix of distributed and reference table, try repartitioning
-DEBUG: A mix of distributed and reference table, routable query is not possible
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
-MERGE INTO reference_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = reference_table.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
-ERROR: Reference table as target is not allowed in MERGE command
--- with a citus local table
-MERGE INTO nullkey_c1_t1 USING citus_local_table ON (nullkey_c1_t1.a = citus_local_table.a)
-WHEN MATCHED THEN UPDATE SET b = citus_local_table.b;
-DEBUG: A mix of distributed and local table, try repartitioning
-DEBUG: A mix of distributed and citus-local table, routable query is not possible
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
-MERGE INTO citus_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = citus_local_table.a)
-WHEN MATCHED THEN DELETE;
-DEBUG: A mix of distributed and local table, try repartitioning
-DEBUG: A mix of distributed and citus-local table, routable query is not possible
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE involving repartition of rows is supported only if the target is distributed
--- with a postgres table
-MERGE INTO nullkey_c1_t1 USING postgres_local_table ON (nullkey_c1_t1.a = postgres_local_table.a)
-WHEN MATCHED THEN UPDATE SET b = postgres_local_table.b;
-DEBUG: There is only one distributed table, merge is not pushable, try repartitioning
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE INTO an distributed table from Postgres table is not yet supported
-MERGE INTO postgres_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = postgres_local_table.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
-DEBUG: There is only one distributed table, merge is not pushable, try repartitioning
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE involving repartition of rows is supported only if the target is distributed
--- using ctes
-WITH cte AS (
- SELECT * FROM nullkey_c1_t1
-)
-MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
-WHEN MATCHED THEN UPDATE SET b = cte.b;
-DEBUG:
-DEBUG: Creating MERGE router plan
-WITH cte AS (
- SELECT * FROM distributed_table
-)
-MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
-WHEN MATCHED THEN UPDATE SET b = cte.b;
-DEBUG: Distributed tables are not co-located, try repartitioning
-DEBUG: For MERGE command, all the distributed tables must be colocated
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
-WITH cte AS materialized (
- SELECT * FROM distributed_table
-)
-MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
-WHEN MATCHED THEN UPDATE SET b = cte.b;
-DEBUG: Distributed tables are not co-located, try repartitioning
-DEBUG: For MERGE command, all the distributed tables must be colocated
-DEBUG: Creating MERGE repartition plan
-ERROR: MERGE operation on non-colocated distributed table(s) without a shard key is not yet supported
-SET client_min_messages TO WARNING;
-DROP SCHEMA query_single_shard_table CASCADE;
-SET search_path TO merge_schema;
-- Test Columnar table
CREATE TABLE target_columnar(cid int, name text) USING columnar;
SELECT create_distributed_table('target_columnar', 'cid');
@@ -4031,7 +3842,7 @@ EXPLAIN MERGE INTO demo_distributed t
USING demo_source_table s
ON (s.id2 + 1 = t.id1)
WHEN MATCHED THEN UPDATE SET val1 = 15;
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
-- Sub-queries and CTEs are not allowed in actions and ON clause
CREATE TABLE target_1 (a int, b int, c int);
@@ -4136,6 +3947,14 @@ WHEN MATCHED THEN
DELETE;
ERROR: Sub-queries and CTEs are not allowed in ON clause for MERGE with repartitioning
HINT: Consider making the source and target colocated and joined on the distribution column to make it a routable query
+-- Datatype mismatch between target and source join column
+WITH src AS (SELECT FLOOR(b) AS a FROM source_2)
+MERGE INTO target_1 t
+USING src
+ON t.a = src.a
+WHEN MATCHED THEN DELETE;
+ERROR: In the MERGE ON clause, there is a datatype mismatch between target's distribution column and the expression originating from the source.
+DETAIL: If the types are different, Citus uses different hash functions for the two column types, which might lead to incorrect repartitioning of the result data
RESET client_min_messages;
DROP SERVER foreign_server CASCADE;
NOTICE: drop cascades to 3 other objects
diff --git a/src/test/regress/expected/merge_schema_sharding.out b/src/test/regress/expected/merge_schema_sharding.out
new file mode 100644
index 000000000..8a9ba89dd
--- /dev/null
+++ b/src/test/regress/expected/merge_schema_sharding.out
@@ -0,0 +1,226 @@
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
+\gset
+\if :server_version_ge_15
+\else
+\q
+\endif
+-- MERGE command performs a join from data_source to target_table_name
+DROP SCHEMA IF EXISTS schema_shard_table1 CASCADE;
+NOTICE: schema "schema_shard_table1" does not exist, skipping
+DROP SCHEMA IF EXISTS schema_shard_table2 CASCADE;
+NOTICE: schema "schema_shard_table2" does not exist, skipping
+DROP SCHEMA IF EXISTS schema_shard_table CASCADE;
+NOTICE: schema "schema_shard_table" does not exist, skipping
+-- test merge with schema-shard tables
+SET citus.shard_replication_factor TO 1;
+SET citus.max_adaptive_executor_pool_size TO 1;
+SET citus.next_shard_id TO 4005000;
+SET citus.enable_repartition_joins TO true;
+CREATE SCHEMA schema_shard_table;
+SET search_path TO schema_shard_table;
+CREATE TABLE reference_table(a int, b int);
+CREATE TABLE distributed_table(a int, b int);
+CREATE TABLE citus_local_table(a int, b int);
+CREATE TABLE postgres_local_table(a int, b int);
+INSERT INTO reference_table SELECT i, i FROM generate_series(0, 5) i;
+INSERT INTO distributed_table SELECT i, i FROM generate_series(3, 8) i;
+INSERT INTO citus_local_table SELECT i, i FROM generate_series(0, 10) i;
+INSERT INTO postgres_local_table SELECT i, i FROM generate_series(5, 10) i;
+SELECT create_reference_table('reference_table');
+NOTICE: Copying data from local table...
+NOTICE: copying the data has completed
+DETAIL: The local data in the table is no longer visible, but is still on disk.
+HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$schema_shard_table.reference_table$$)
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT create_distributed_table('distributed_table', 'a');
+NOTICE: Copying data from local table...
+NOTICE: copying the data has completed
+DETAIL: The local data in the table is no longer visible, but is still on disk.
+HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$schema_shard_table.distributed_table$$)
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT citus_add_local_table_to_metadata('citus_local_table');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+SET citus.enable_schema_based_sharding TO ON;
+CREATE SCHEMA schema_shard_table1;
+CREATE SCHEMA schema_shard_table2;
+SET search_path TO schema_shard_table1;
+CREATE TABLE nullkey_c1_t1(a int, b int);
+CREATE TABLE nullkey_c1_t2(a int, b int);
+INSERT INTO nullkey_c1_t1 SELECT i, i FROM generate_series(0, 5) i;
+INSERT INTO nullkey_c1_t2 SELECT i, i FROM generate_series(3, 8) i;
+SET search_path TO schema_shard_table2;
+CREATE TABLE nullkey_c2_t1(a int, b int);
+CREATE TABLE nullkey_c2_t2(a int, b int);
+INSERT INTO nullkey_c2_t1 SELECT i, i FROM generate_series(0, 5) i;
+INSERT INTO nullkey_c2_t2 SELECT i, i FROM generate_series(3, 8) i;
+SET search_path TO schema_shard_table1;
+-- with a colocated table
+SET client_min_messages TO DEBUG2;
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b;
+DEBUG: Creating router plan
+DEBUG:
+DEBUG: Creating MERGE router plan
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN DELETE;
+DEBUG: Creating router plan
+DEBUG:
+DEBUG: Creating MERGE router plan
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
+DEBUG: Creating router plan
+DEBUG:
+DEBUG: Creating MERGE router plan
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
+DEBUG: Creating router plan
+DEBUG:
+DEBUG: Creating MERGE router plan
+SET search_path TO schema_shard_table2;
+-- with non-colocated schema-shard table
+MERGE INTO schema_shard_table1.nullkey_c1_t1 USING nullkey_c2_t1 ON (schema_shard_table1.nullkey_c1_t1.a = nullkey_c2_t1.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b;
+DEBUG: Distributed tables are not co-located, try repartitioning
+DEBUG: For MERGE command, all the distributed tables must be colocated
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+MERGE INTO schema_shard_table1.nullkey_c1_t1 USING nullkey_c2_t1 ON (schema_shard_table1.nullkey_c1_t1.a = nullkey_c2_t1.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b);
+DEBUG: Distributed tables are not co-located, try repartitioning
+DEBUG: For MERGE command, all the distributed tables must be colocated
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+-- with a distributed table
+SET search_path TO schema_shard_table1;
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.distributed_table ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.distributed_table.b
+WHEN NOT MATCHED THEN INSERT VALUES (schema_shard_table.distributed_table.a, schema_shard_table.distributed_table.b);
+DEBUG: Distributed tables are not co-located, try repartitioning
+DEBUG: For MERGE command, all the distributed tables must be colocated
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+MERGE INTO schema_shard_table.distributed_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a)
+WHEN MATCHED THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
+DEBUG: Distributed tables are not co-located, try repartitioning
+DEBUG: For MERGE command, all the distributed tables must be colocated
+DEBUG: Creating MERGE repartition plan
+DEBUG: Using column - index:0 from the source list to redistribute
+DEBUG: Distributed planning for a fast-path router query
+DEBUG: Creating router plan
+DEBUG: Collect source query results on coordinator
+DEBUG: Create a MERGE task list that needs to be routed
+DEBUG:
+DEBUG: distributed statement: MERGE INTO schema_shard_table.distributed_table_4005001 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005001'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
+DEBUG:
+DEBUG: distributed statement: MERGE INTO schema_shard_table.distributed_table_4005002 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005002'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
+DEBUG:
+DEBUG: distributed statement: MERGE INTO schema_shard_table.distributed_table_4005003 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005003'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
+DEBUG:
+DEBUG: distributed statement: MERGE INTO schema_shard_table.distributed_table_4005004 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005004'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c1_t1 ON (nullkey_c1_t1.a OPERATOR(pg_catalog.=) citus_table_alias.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b)
+DEBUG: Execute MERGE task list
+RESET client_min_messages;
+SELECT count(*) FROM schema_shard_table.distributed_table WHERE a in (0, 1, 2);
+ count
+---------------------------------------------------------------------
+ 3
+(1 row)
+
+MERGE INTO schema_shard_table.distributed_table
+USING (SELECT s1.a AS s1a, s2.b AS s2b
+ FROM nullkey_c1_t1 s1 JOIN schema_shard_table2.nullkey_c2_t1 s2
+ ON s1.a = s2.a) src
+ON (src.s1a = schema_shard_table.distributed_table.a)
+WHEN MATCHED THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (src.s1a, src.s2b);
+-- Three matching rows must be deleted
+SELECT count(*) FROM schema_shard_table.distributed_table WHERE a in (0, 1, 2);
+ count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+-- with a reference table
+SET client_min_messages TO DEBUG2;
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.reference_table ON (nullkey_c1_t1.a = schema_shard_table.reference_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.reference_table.b;
+DEBUG: A mix of distributed and reference table, try repartitioning
+DEBUG: A mix of distributed and reference table, routable query is not possible
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+MERGE INTO schema_shard_table.reference_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.reference_table.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
+ERROR: Reference table as target is not allowed in MERGE command
+-- with a citus local table
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.citus_local_table ON (nullkey_c1_t1.a = schema_shard_table.citus_local_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.citus_local_table.b;
+DEBUG: A mix of distributed and local table, try repartitioning
+DEBUG: A mix of distributed and citus-local table, routable query is not possible
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+MERGE INTO schema_shard_table.citus_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.citus_local_table.a)
+WHEN MATCHED THEN DELETE;
+DEBUG: A mix of distributed and local table, try repartitioning
+DEBUG: A mix of distributed and citus-local table, routable query is not possible
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE involving repartition of rows is supported only if the target is distributed
+-- with a postgres table
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.postgres_local_table ON (nullkey_c1_t1.a = schema_shard_table.postgres_local_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.postgres_local_table.b;
+DEBUG: There is only one distributed table, merge is not pushable, try repartitioning
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE INTO an distributed table from Postgres table is not yet supported
+MERGE INTO schema_shard_table.postgres_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.postgres_local_table.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
+DEBUG: There is only one distributed table, merge is not pushable, try repartitioning
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE involving repartition of rows is supported only if the target is distributed
+-- using ctes
+WITH cte AS (
+ SELECT * FROM nullkey_c1_t1
+)
+MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
+WHEN MATCHED THEN UPDATE SET b = cte.b;
+DEBUG: Creating router plan
+DEBUG:
+DEBUG: Creating MERGE router plan
+WITH cte AS (
+ SELECT * FROM schema_shard_table.distributed_table
+)
+MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
+WHEN MATCHED THEN UPDATE SET b = cte.b;
+DEBUG: Distributed tables are not co-located, try repartitioning
+DEBUG: For MERGE command, all the distributed tables must be colocated
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+WITH cte AS materialized (
+ SELECT * FROM schema_shard_table.distributed_table
+)
+MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
+WHEN MATCHED THEN UPDATE SET b = cte.b;
+DEBUG: Distributed tables are not co-located, try repartitioning
+DEBUG: For MERGE command, all the distributed tables must be colocated
+DEBUG: Creating MERGE repartition plan
+ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported
+SET client_min_messages TO WARNING;
+DROP SCHEMA schema_shard_table1 CASCADE;
+DROP SCHEMA schema_shard_table2 CASCADE;
+DROP SCHEMA schema_shard_table CASCADE;
diff --git a/src/test/regress/expected/merge_schema_sharding_0.out b/src/test/regress/expected/merge_schema_sharding_0.out
new file mode 100644
index 000000000..a7e3fbf20
--- /dev/null
+++ b/src/test/regress/expected/merge_schema_sharding_0.out
@@ -0,0 +1,6 @@
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
+\gset
+\if :server_version_ge_15
+\else
+\q
diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out
index 62268b32f..29d62c46a 100644
--- a/src/test/regress/expected/metadata_sync_helpers.out
+++ b/src/test/regress/expected/metadata_sync_helpers.out
@@ -1284,8 +1284,17 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus_internal gpid=10000000001';
-- with an ugly trick, update the vartype of table from int to bigint
-- so that making two tables colocated fails
- UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}'
+ -- include varnullingrels for PG16
+ SHOW server_version \gset
+ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+ \gset
+ \if :server_version_ge_16
+ UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
+ \else
+ UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
+ WHERE logicalrelid = 'test_2'::regclass;
+ \endif
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
ERROR: cannot colocate tables test_2 and test_3
ROLLBACK;
diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out
index 64c6e3667..a76d34d03 100644
--- a/src/test/regress/expected/multi_alter_table_add_constraints.out
+++ b/src/test/regress/expected/multi_alter_table_add_constraints.out
@@ -671,6 +671,27 @@ SELECT create_distributed_table('alter_add_unique', 'x');
ALTER TABLE alter_add_unique ADD CONSTRAINT unique_constraint_test UNIQUE USING INDEX alter_unique_idx;
NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "alter_unique_idx" to "unique_constraint_test"
ALTER TABLE alter_add_unique DROP CONSTRAINT unique_constraint_test;
+CREATE TABLE unique_test_table_single_shard(id int, name varchar(20));
+SELECT create_distributed_table('unique_test_table_single_shard', 'id', shard_count=>1);
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ALTER TABLE unique_test_table_single_shard ADD UNIQUE(id, name) WITH (fillfactor=20);
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_index_defs FROM get_index_defs('sc3', 'unique_test_table_single_shard')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"indexdefs": ["CREATE UNIQUE INDEX unique_test_table_single_shard_id_name_key ON sc3.unique_test_table_single_shard USING btree (id, name) WITH (fillfactor='20')"], "indexnames": ["unique_test_table_single_shard_id_name_key"]}]
+ f | [{"indexdefs": ["CREATE UNIQUE INDEX unique_test_table_single_shard_id_name_key ON sc3.unique_test_table_single_shard USING btree (id, name) WITH (fillfactor='20')", "CREATE UNIQUE INDEX unique_test_table_single_shard_id_name_key_1450242 ON sc3.unique_test_table_single_shard_1450242 USING btree (id, name) WITH (fillfactor='20')"], "indexnames": ["unique_test_table_single_shard_id_name_key", "unique_test_table_single_shard_id_name_key_1450242"]}]
+ f | [{"indexdefs": ["CREATE UNIQUE INDEX unique_test_table_single_shard_id_name_key ON sc3.unique_test_table_single_shard USING btree (id, name) WITH (fillfactor='20')", "CREATE UNIQUE INDEX unique_test_table_single_shard_id_name_key_1450242 ON sc3.unique_test_table_single_shard_1450242 USING btree (id, name) WITH (fillfactor='20')"], "indexnames": ["unique_test_table_single_shard_id_name_key", "unique_test_table_single_shard_id_name_key_1450242"]}]
+(3 rows)
+
+DROP TABLE unique_test_table_single_shard;
SET search_path TO 'public';
DROP SCHEMA sc1 CASCADE;
NOTICE: drop cascades to table sc1.alter_add_prim_key
diff --git a/src/test/regress/expected/multi_alter_table_statements.out b/src/test/regress/expected/multi_alter_table_statements.out
index 52fe8d762..c24927504 100644
--- a/src/test/regress/expected/multi_alter_table_statements.out
+++ b/src/test/regress/expected/multi_alter_table_statements.out
@@ -1,7 +1,9 @@
--
-- MULTI_ALTER_TABLE_STATEMENTS
--
-ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000;
+CREATE SCHEMA multi_alter_table_statements;
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 220000;
-- Check that we can run ALTER TABLE statements on distributed tables.
-- We set the shardid sequence here so that the shardids in this test
-- aren't affected by changes to the previous tests.
@@ -48,6 +50,8 @@ SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' OR
(1 row)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 221000;
-- Verify that we can add columns
ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT;
ALTER TABLE lineitem_alter ADD COLUMN date_column DATE;
@@ -93,7 +97,9 @@ ORDER BY attnum;
(27 rows)
\c - - - :master_port
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 222000;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -153,7 +159,7 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
-- Verify that SET NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -191,7 +197,7 @@ DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02,
END;
-- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -235,7 +241,7 @@ SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP B
(2 rows)
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -299,7 +305,7 @@ SELECT SUM(l_orderkey) FROM lineitem_alter;
53620791
(1 row)
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -324,7 +330,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
-- Verify that we can execute commands with multiple subcommands
ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
ADD COLUMN int_column2 INTEGER;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -353,7 +359,7 @@ ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER,
ERROR: alter table command is currently unsupported
DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -410,7 +416,7 @@ ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2;
NOTICE: relation "non_existent_table" does not exist, skipping
-- Verify that none of the failed alter table commands took effect on the master
-- node
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -461,7 +467,7 @@ BEGIN;
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
ALTER TABLE lineitem_alter ADD COLUMN first integer;
COMMIT;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
Column | Type | Modifiers
---------------------------------------------------------------------
l_orderkey | bigint | not null
@@ -536,8 +542,10 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
DROP INDEX temp_index_2;
-- Add column on only one worker...
\c - - - :worker_2_port
-ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer;
+ALTER TABLE multi_alter_table_statements.lineitem_alter_220000 ADD COLUMN first integer;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 223000;
-- and try to add it in a multi-statement block, which fails
BEGIN;
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
@@ -637,7 +645,7 @@ DROP INDEX replica_idx;
ALTER TABLE single_shard_items REPLICA IDENTITY default;
-- Drop the column from the worker...
\c - - - :worker_2_port
-ALTER TABLE lineitem_alter_220000 DROP COLUMN first;
+ALTER TABLE multi_alter_table_statements.lineitem_alter_220000 DROP COLUMN first;
-- Create table to trigger at-xact-end (deferred) failure
CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED);
-- Use an event trigger to log all DDL event tags in it
@@ -650,6 +658,8 @@ $ldt$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag();
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 224000;
-- The above trigger will cause failure at transaction end on one placement.
-- Citus always uses 2PC. 2PC should handle this "best" (no divergence)
BEGIN;
@@ -670,6 +680,8 @@ DROP EVENT TRIGGER log_ddl_tag;
DROP FUNCTION log_ddl_tag();
DROP TABLE ddl_commands;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 225000;
-- Distributed SELECTs may appear after ALTER
BEGIN;
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
@@ -724,7 +736,7 @@ SELECT create_distributed_table('test_ab', 'a', 'hash');
INSERT INTO test_ab VALUES (2, 10);
INSERT INTO test_ab VALUES (2, 11);
CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a);
-ERROR: could not create unique index "temp_unique_index_1_220011"
+ERROR: could not create unique index "temp_unique_index_1_225006"
DETAIL: Key (a)=(2) is duplicated.
CONTEXT: while executing command on localhost:xxxxx
SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
@@ -775,6 +787,8 @@ ORDER BY attnum;
(30 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 226000;
-- verify that we can rename distributed tables
SHOW citus.enable_ddl_propagation;
citus.enable_ddl_propagation
@@ -796,24 +810,28 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY re
relname
---------------------------------------------------------------------
lineitem_renamed_220000
- lineitem_renamed_220001
- lineitem_renamed_220003
+ lineitem_renamed_222000
+ lineitem_renamed_222002
(3 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 227000;
-- revert it to original name
ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
-- show rename worked on one worker, too
\c - - - :worker_1_port
-SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
+SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_222001' /* failed copy trails */ ORDER BY relname;
relname
---------------------------------------------------------------------
lineitem_alter_220000
- lineitem_alter_220001
- lineitem_alter_220003
+ lineitem_alter_222000
+ lineitem_alter_222002
(3 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 228000;
-- verify that we can set and reset storage parameters
ALTER TABLE lineitem_alter SET(fillfactor=40);
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
@@ -823,15 +841,17 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
(1 row)
\c - - - :worker_1_port
-SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
+SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_222001' /* failed copy trails */ ORDER BY relname;
relname | reloptions
---------------------------------------------------------------------
lineitem_alter_220000 | {fillfactor=40}
- lineitem_alter_220001 | {fillfactor=40}
- lineitem_alter_220003 | {fillfactor=40}
+ lineitem_alter_222000 | {fillfactor=40}
+ lineitem_alter_222002 | {fillfactor=40}
(3 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 229000;
ALTER TABLE lineitem_alter RESET(fillfactor);
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
relname | reloptions
@@ -840,15 +860,17 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
(1 row)
\c - - - :worker_1_port
-SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
+SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_222001' /* failed copy trails */ ORDER BY relname;
relname | reloptions
---------------------------------------------------------------------
lineitem_alter_220000 |
- lineitem_alter_220001 |
- lineitem_alter_220003 |
+ lineitem_alter_222000 |
+ lineitem_alter_222002 |
(3 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 230000;
-- verify that we can rename indexes on distributed tables
CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber);
ALTER INDEX temp_index_1 RENAME TO idx_lineitem_linenumber;
@@ -865,11 +887,13 @@ SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER
relname
---------------------------------------------------------------------
idx_lineitem_linenumber_220000
- idx_lineitem_linenumber_220001
- idx_lineitem_linenumber_220003
+ idx_lineitem_linenumber_222000
+ idx_lineitem_linenumber_222002
(3 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 231000;
-- now get rid of the index
DROP INDEX idx_lineitem_linenumber;
-- verify that we don't intercept DDL commands if propagation is turned off
@@ -889,9 +913,11 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int;
-- verify newly added column is not present in a worker shard
\c - - - :worker_1_port
-SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0;
+SELECT column_only_added_to_master FROM multi_alter_table_statements.lineitem_alter_220000 LIMIT 0;
ERROR: column "column_only_added_to_master" does not exist
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 232000;
-- ddl propagation flag is reset to default, disable it again
SET citus.enable_ddl_propagation to false;
-- following query succeeds since it accesses an previously existing column
@@ -933,6 +959,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alte
(0 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 233000;
-- verify alter table and drop sequence in the same transaction does not cause deadlock
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 2;
@@ -967,7 +995,7 @@ SELECT create_distributed_table('trigger_table', 'id');
-- first set a trigger on a shard
\c - - - :worker_1_port
SET citus.enable_metadata_sync TO OFF;
-CREATE FUNCTION update_value() RETURNS trigger AS $up$
+CREATE OR REPLACE FUNCTION update_value() RETURNS trigger AS $up$
BEGIN
NEW.value := 'trigger enabled';
RETURN NEW;
@@ -975,9 +1003,11 @@ CREATE FUNCTION update_value() RETURNS trigger AS $up$
$up$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
CREATE TRIGGER update_value
-BEFORE INSERT ON trigger_table_220017
+BEFORE INSERT ON multi_alter_table_statements.trigger_table_233004
FOR EACH ROW EXECUTE PROCEDURE update_value();
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 234000;
INSERT INTO trigger_table VALUES (1, 'trigger disabled');
SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value;
value | count
@@ -1019,37 +1049,41 @@ DROP TABLESPACE super_fast_ssd;
SET citus.enable_ddl_propagation to true;
CREATE USER alter_table_owner WITH LOGIN;
GRANT USAGE ON SCHEMA public TO alter_table_owner;
+GRANT USAGE ON SCHEMA multi_alter_table_statements TO alter_table_owner;
\c - alter_table_owner - :master_port
-- should not be able to access table without permission
-SELECT count(*) FROM lineitem_alter;
+SELECT count(*) FROM multi_alter_table_statements.lineitem_alter;
ERROR: permission denied for table lineitem_alter
-- should not be able to drop the table as non table owner
-DROP TABLE lineitem_alter;
+DROP TABLE multi_alter_table_statements.lineitem_alter;
ERROR: must be owner of table lineitem_alter
\c - postgres - :master_port
-ALTER TABLE lineitem_alter OWNER TO alter_table_owner;
+ALTER TABLE multi_alter_table_statements.lineitem_alter OWNER TO alter_table_owner;
\c - alter_table_owner - :master_port
-- should be able to query the table as table owner
-SELECT count(*) FROM lineitem_alter;
+SELECT count(*) FROM multi_alter_table_statements.lineitem_alter;
count
---------------------------------------------------------------------
18000
(1 row)
-- should be able to drop the table as table owner
-DROP TABLE lineitem_alter;
+DROP TABLE multi_alter_table_statements.lineitem_alter;
-- check that nothing's left over on workers, other than the leftover shard created
-- during the unsuccessful COPY
\c - postgres - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
relname
---------------------------------------------------------------------
- lineitem_alter_220002
+ lineitem_alter_222001
(1 row)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 235000;
-- drop the roles created
REVOKE ALL ON SCHEMA PUBLIC FROM alter_table_owner;
+REVOKE ALL ON SCHEMA multi_alter_table_statements FROM alter_table_owner;
DROP ROLE alter_table_owner;
-- Test alter table with drop table in the same transaction
BEGIN;
@@ -1071,6 +1105,8 @@ SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%';
(0 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 236000;
-- verify logged info is propagated to workers when distributing the table
CREATE TABLE logged_test(id int);
ALTER TABLE logged_test SET UNLOGGED;
@@ -1084,13 +1120,15 @@ SELECT create_distributed_table('logged_test', 'id');
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
- logged_test_220022 | unlogged
- logged_test_220023 | unlogged
- logged_test_220024 | unlogged
- logged_test_220025 | unlogged
+ logged_test_236000 | unlogged
+ logged_test_236001 | unlogged
+ logged_test_236002 | unlogged
+ logged_test_236003 | unlogged
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 237000;
-- verify SET LOGGED/UNLOGGED works after distributing the table
ALTER TABLE logged_test SET LOGGED;
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
@@ -1103,13 +1141,15 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
- logged_test_220022 | logged
- logged_test_220023 | logged
- logged_test_220024 | logged
- logged_test_220025 | logged
+ logged_test_236000 | logged
+ logged_test_236001 | logged
+ logged_test_236002 | logged
+ logged_test_236003 | logged
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 238000;
ALTER TABLE logged_test SET UNLOGGED;
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
relname | logged_info
@@ -1121,13 +1161,15 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
- logged_test_220022 | unlogged
- logged_test_220023 | unlogged
- logged_test_220024 | unlogged
- logged_test_220025 | unlogged
+ logged_test_236000 | unlogged
+ logged_test_236001 | unlogged
+ logged_test_236002 | unlogged
+ logged_test_236003 | unlogged
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 239000;
DROP TABLE logged_test;
-- Test WITH options on a normal simple hash-distributed table
CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40);
@@ -1148,13 +1190,15 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist';
SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist_%' ORDER BY relname;
relname | reloptions
---------------------------------------------------------------------
- hash_dist_220026 | {fillfactor=40}
- hash_dist_220027 | {fillfactor=40}
- hash_dist_220028 | {fillfactor=40}
- hash_dist_220029 | {fillfactor=40}
+ hash_dist_239000 | {fillfactor=40}
+ hash_dist_239001 | {fillfactor=40}
+ hash_dist_239002 | {fillfactor=40}
+ hash_dist_239003 | {fillfactor=40}
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 240000;
-- verify that we can set and reset index storage parameters
ALTER INDEX hash_dist_pkey SET(fillfactor=40);
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
@@ -1167,13 +1211,15 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname;
relname | reloptions
---------------------------------------------------------------------
- hash_dist_pkey_220026 | {fillfactor=40}
- hash_dist_pkey_220027 | {fillfactor=40}
- hash_dist_pkey_220028 | {fillfactor=40}
- hash_dist_pkey_220029 | {fillfactor=40}
+ hash_dist_pkey_239000 | {fillfactor=40}
+ hash_dist_pkey_239001 | {fillfactor=40}
+ hash_dist_pkey_239002 | {fillfactor=40}
+ hash_dist_pkey_239003 | {fillfactor=40}
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 241000;
ALTER INDEX hash_dist_pkey RESET(fillfactor);
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
relname | reloptions
@@ -1185,13 +1231,15 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname;
relname | reloptions
---------------------------------------------------------------------
- hash_dist_pkey_220026 |
- hash_dist_pkey_220027 |
- hash_dist_pkey_220028 |
- hash_dist_pkey_220029 |
+ hash_dist_pkey_239000 |
+ hash_dist_pkey_239001 |
+ hash_dist_pkey_239002 |
+ hash_dist_pkey_239003 |
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 242000;
-- verify error message on ALTER INDEX, SET TABLESPACE is unsupported
ALTER INDEX hash_dist_pkey SET TABLESPACE foo;
ERROR: alter index ... set tablespace ... is currently unsupported
@@ -1209,13 +1257,15 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index';
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index_%' ORDER BY relname;
relname | reloptions
---------------------------------------------------------------------
- another_index_220026 | {fillfactor=50}
- another_index_220027 | {fillfactor=50}
- another_index_220028 | {fillfactor=50}
- another_index_220029 | {fillfactor=50}
+ another_index_239000 | {fillfactor=50}
+ another_index_239001 | {fillfactor=50}
+ another_index_239002 | {fillfactor=50}
+ another_index_239003 | {fillfactor=50}
(4 rows)
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 243000;
-- get rid of the index
DROP INDEX another_index;
-- check if we fail properly when a column with un-supported constraint is added
@@ -1246,15 +1296,24 @@ SELECT create_reference_table('reference_table');
(1 row)
-ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE;
-ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints
-DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names
-HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name FOREIGN KEY (test_col) REFERENCES reference_table(i) ON DELETE CASCADE;
-ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL;
-ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints
-DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names
-HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name FOREIGN KEY (test_col) REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL;
-DROP TABLE reference_table;
+ALTER TABLE test_table_1 ADD COLUMN test_col_1 int REFERENCES reference_table(i) ON DELETE CASCADE;
+ALTER TABLE test_table_1 ADD COLUMN test_col_2 int REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL;
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('multi_alter_table_statements.test_table_1')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"deferred": false, "deferable": false, "on_delete": "c", "on_update": "a", "match_type": "s", "constraint_names": ["test_table_1__fkey"], "referenced_tables": ["multi_alter_table_statements.reference_table"], "referenced_columns": ["i"], "referencing_tables": ["multi_alter_table_statements.test_table_1"], "referencing_columns": ["test_col_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "c", "on_update": "n", "match_type": "s", "constraint_names": ["test_table_1__fkey1"], "referenced_tables": ["multi_alter_table_statements.reference_table"], "referenced_columns": ["i"], "referencing_tables": ["multi_alter_table_statements.test_table_1"], "referencing_columns": ["test_col_2"], "referencing_columns_set_null_or_default": null}]
+ f | [{"deferred": false, "deferable": false, "on_delete": "c", "on_update": "a", "match_type": "s", "constraint_names": ["test_table_1__fkey", "test_table_1__fkey_243000", "test_table_1__fkey_243002"], "referenced_tables": ["multi_alter_table_statements.reference_table", "multi_alter_table_statements.reference_table_243004", "multi_alter_table_statements.reference_table_243004"], "referenced_columns": ["i"], "referencing_tables": ["multi_alter_table_statements.test_table_1", "multi_alter_table_statements.test_table_1_243000", "multi_alter_table_statements.test_table_1_243002"], "referencing_columns": ["test_col_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "c", "on_update": "n", "match_type": "s", "constraint_names": ["test_table_1__fkey1", "test_table_1__fkey1_243000", "test_table_1__fkey1_243002"], "referenced_tables": ["multi_alter_table_statements.reference_table", "multi_alter_table_statements.reference_table_243004", "multi_alter_table_statements.reference_table_243004"], "referenced_columns": ["i"], "referencing_tables": ["multi_alter_table_statements.test_table_1", "multi_alter_table_statements.test_table_1_243000", "multi_alter_table_statements.test_table_1_243002"], "referencing_columns": ["test_col_2"], "referencing_columns_set_null_or_default": null}]
+ f | [{"deferred": false, "deferable": false, "on_delete": "c", "on_update": "a", "match_type": "s", "constraint_names": ["test_table_1__fkey", "test_table_1__fkey_243001", "test_table_1__fkey_243003"], "referenced_tables": ["multi_alter_table_statements.reference_table", "multi_alter_table_statements.reference_table_243004", "multi_alter_table_statements.reference_table_243004"], "referenced_columns": ["i"], "referencing_tables": ["multi_alter_table_statements.test_table_1", "multi_alter_table_statements.test_table_1_243001", "multi_alter_table_statements.test_table_1_243003"], "referencing_columns": ["test_col_1"], "referencing_columns_set_null_or_default": null}, {"deferred": false, "deferable": false, "on_delete": "c", "on_update": "n", "match_type": "s", "constraint_names": ["test_table_1__fkey1", "test_table_1__fkey1_243001", "test_table_1__fkey1_243003"], "referenced_tables": ["multi_alter_table_statements.reference_table", "multi_alter_table_statements.reference_table_243004", "multi_alter_table_statements.reference_table_243004"], "referenced_columns": ["i"], "referencing_tables": ["multi_alter_table_statements.test_table_1", "multi_alter_table_statements.test_table_1_243001", "multi_alter_table_statements.test_table_1_243003"], "referencing_columns": ["test_col_2"], "referencing_columns_set_null_or_default": null}]
+(3 rows)
+
+BEGIN;
+ SET LOCAL client_min_messages TO WARNING;
+ DROP TABLE reference_table CASCADE;
+COMMIT;
CREATE TABLE referenced_table(i int UNIQUE);
SELECT create_distributed_table('referenced_table', 'i');
create_distributed_table
@@ -1262,7 +1321,7 @@ SELECT create_distributed_table('referenced_table', 'i');
(1 row)
-ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i);
+ALTER TABLE test_table_1 ADD COLUMN test_col_3 int REFERENCES referenced_table(i);
ERROR: cannot create foreign key constraint
DETAIL: Foreign keys are supported in two cases, either in between two colocated tables including partition column in the same ordinal in the both tables or from distributed to reference tables
DROP TABLE referenced_table, test_table_1;
@@ -1290,8 +1349,7 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.p
(schema,{test_schema_for_sequence_propagation},{})
(1 row)
+SET client_min_messages TO WARNING;
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to sequence test_schema_for_sequence_propagation.seq_10
-drop cascades to default value for column x of table table_without_sequence
DROP TABLE table_without_sequence;
+DROP SCHEMA multi_alter_table_statements CASCADE;
diff --git a/src/test/regress/expected/multi_complex_count_distinct.out b/src/test/regress/expected/multi_complex_count_distinct.out
index d4e6ecfa3..baa9c829a 100644
--- a/src/test/regress/expected/multi_complex_count_distinct.out
+++ b/src/test/regress/expected/multi_complex_count_distinct.out
@@ -1,6 +1,18 @@
--
-- COMPLEX_COUNT_DISTINCT
--
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
+ server_version_ge_16
+---------------------------------------------------------------------
+ t
+(1 row)
+
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
@@ -65,7 +77,7 @@ SELECT
GROUP BY l_orderkey
ORDER BY 2 DESC, 1 DESC
LIMIT 10;
- QUERY PLAN
+ QUERY PLAN
---------------------------------------------------------------------
Limit
Output: remote_scan.l_orderkey, remote_scan.count
@@ -87,9 +99,12 @@ SELECT
-> GroupAggregate
Output: l_orderkey, count(DISTINCT l_partkey)
Group Key: lineitem_hash.l_orderkey
- -> Index Scan Backward using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
- Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-(22 rows)
+ -> Sort
+ Output: l_orderkey, l_partkey
+ Sort Key: lineitem_hash.l_orderkey DESC, lineitem_hash.l_partkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey
+(25 rows)
-- it is also supported if there is no grouping or grouping is on non-partition field
SELECT
@@ -108,7 +123,7 @@ SELECT
FROM lineitem_hash
ORDER BY 1 DESC
LIMIT 10;
- QUERY PLAN
+ QUERY PLAN
---------------------------------------------------------------------
Limit
Output: (count(DISTINCT remote_scan.count))
@@ -117,19 +132,22 @@ SELECT
Sort Key: (count(DISTINCT remote_scan.count)) DESC
-> Aggregate
Output: count(DISTINCT remote_scan.count)
- -> Custom Scan (Citus Adaptive)
+ -> Sort
Output: remote_scan.count
- Task Count: 8
- Tasks Shown: One of 8
- -> Task
- Query: SELECT l_partkey AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_partkey
- Node: host=localhost port=xxxxx dbname=regression
- -> HashAggregate
- Output: l_partkey
- Group Key: lineitem_hash.l_partkey
- -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
- Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-(19 rows)
+ Sort Key: remote_scan.count
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.count
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_partkey AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_partkey
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_partkey
+ Group Key: lineitem_hash.l_partkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(22 rows)
SELECT
l_shipmode, count(DISTINCT l_partkey)
@@ -167,7 +185,7 @@ SELECT
Group Key: remote_scan.l_shipmode
-> Sort
Output: remote_scan.l_shipmode, remote_scan.count
- Sort Key: remote_scan.l_shipmode DESC
+ Sort Key: remote_scan.l_shipmode DESC, remote_scan.count
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_shipmode, remote_scan.count
Task Count: 8
@@ -210,7 +228,7 @@ SELECT
GROUP BY l_orderkey
ORDER BY 3 DESC, 2 DESC, 1
LIMIT 10;
- QUERY PLAN
+ QUERY PLAN
---------------------------------------------------------------------
Limit
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
@@ -232,9 +250,12 @@ SELECT
-> GroupAggregate
Output: l_orderkey, count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
Group Key: lineitem_hash.l_orderkey
- -> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
- Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-(22 rows)
+ -> Sort
+ Output: l_orderkey, l_partkey, l_shipmode
+ Sort Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_shipmode
+(25 rows)
-- partition/non-partition column count distinct no grouping
SELECT
@@ -249,23 +270,26 @@ EXPLAIN (COSTS false, VERBOSE true)
SELECT
count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode)
FROM lineitem_hash;
- QUERY PLAN
+ QUERY PLAN
---------------------------------------------------------------------
Aggregate
Output: count(DISTINCT remote_scan.count), count(DISTINCT remote_scan.count_1), count(DISTINCT remote_scan.count_2)
- -> Custom Scan (Citus Adaptive)
+ -> Sort
Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2
- Task Count: 8
- Tasks Shown: One of 8
- -> Task
- Query: SELECT l_orderkey AS count, l_partkey AS count, l_shipmode AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey, l_partkey, l_shipmode
- Node: host=localhost port=xxxxx dbname=regression
- -> HashAggregate
- Output: l_orderkey, l_partkey, l_shipmode
- Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
- -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
- Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-(14 rows)
+ Sort Key: remote_scan.count
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_orderkey AS count, l_partkey AS count, l_shipmode AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey, l_partkey, l_shipmode
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_orderkey, l_partkey, l_shipmode
+ Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(17 rows)
-- distinct/non-distinct on partition and non-partition columns
SELECT
@@ -433,7 +457,7 @@ SELECT *
Group Key: lineitem_hash.l_partkey
-> Sort
Output: l_partkey, l_orderkey
- Sort Key: lineitem_hash.l_partkey
+ Sort Key: lineitem_hash.l_partkey, lineitem_hash.l_orderkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_partkey, l_orderkey
Task Count: 1
@@ -483,7 +507,7 @@ SELECT
GROUP BY l_orderkey
ORDER BY 2 DESC, 3 DESC, 1
LIMIT 10;
- QUERY PLAN
+ QUERY PLAN
---------------------------------------------------------------------
Limit
Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
@@ -505,9 +529,12 @@ SELECT
-> GroupAggregate
Output: l_orderkey, count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar)), count(DISTINCT l_suppkey)
Group Key: lineitem_hash.l_orderkey
- -> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
- Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
-(22 rows)
+ -> Sort
+ Output: l_orderkey, l_suppkey, l_shipmode
+ Sort Key: lineitem_hash.l_orderkey, lineitem_hash.l_suppkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_suppkey, l_shipmode
+(25 rows)
-- group by on non-partition column
SELECT
@@ -550,7 +577,7 @@ SELECT
Group Key: remote_scan.l_suppkey
-> Sort
Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
- Sort Key: remote_scan.l_suppkey DESC
+ Sort Key: remote_scan.l_suppkey DESC, remote_scan.count
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
Task Count: 8
diff --git a/src/test/regress/expected/multi_complex_count_distinct_0.out b/src/test/regress/expected/multi_complex_count_distinct_0.out
new file mode 100644
index 000000000..36af62e96
--- /dev/null
+++ b/src/test/regress/expected/multi_complex_count_distinct_0.out
@@ -0,0 +1,1139 @@
+--
+-- COMPLEX_COUNT_DISTINCT
+--
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
+ server_version_ge_16
+---------------------------------------------------------------------
+ f
+(1 row)
+
+SET citus.next_shard_id TO 240000;
+SET citus.shard_count TO 8;
+SET citus.shard_replication_factor TO 1;
+SET citus.coordinator_aggregation_strategy TO 'disabled';
+CREATE TABLE lineitem_hash (
+ l_orderkey bigint not null,
+ l_partkey integer not null,
+ l_suppkey integer not null,
+ l_linenumber integer not null,
+ l_quantity decimal(15, 2) not null,
+ l_extendedprice decimal(15, 2) not null,
+ l_discount decimal(15, 2) not null,
+ l_tax decimal(15, 2) not null,
+ l_returnflag char(1) not null,
+ l_linestatus char(1) not null,
+ l_shipdate date not null,
+ l_commitdate date not null,
+ l_receiptdate date not null,
+ l_shipinstruct char(25) not null,
+ l_shipmode char(10) not null,
+ l_comment varchar(44) not null,
+ PRIMARY KEY(l_orderkey, l_linenumber) );
+SELECT create_distributed_table('lineitem_hash', 'l_orderkey', 'hash');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+\set lineitem_1_data_file :abs_srcdir '/data/lineitem.1.data'
+\set lineitem_2_data_file :abs_srcdir '/data/lineitem.2.data'
+\set client_side_copy_command '\\copy lineitem_hash FROM ' :'lineitem_1_data_file' ' with delimiter '''|''';'
+:client_side_copy_command
+\set client_side_copy_command '\\copy lineitem_hash FROM ' :'lineitem_2_data_file' ' with delimiter '''|''';'
+:client_side_copy_command
+ANALYZE lineitem_hash;
+-- count(distinct) is supported on top level query if there
+-- is a grouping on the partition key
+SELECT
+ l_orderkey, count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_orderkey
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_orderkey | count
+---------------------------------------------------------------------
+ 14885 | 7
+ 14884 | 7
+ 14821 | 7
+ 14790 | 7
+ 14785 | 7
+ 14755 | 7
+ 14725 | 7
+ 14694 | 7
+ 14627 | 7
+ 14624 | 7
+(10 rows)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ l_orderkey, count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_orderkey
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Limit
+ Output: remote_scan.l_orderkey, remote_scan.count
+ -> Sort
+ Output: remote_scan.l_orderkey, remote_scan.count
+ Sort Key: remote_scan.count DESC, remote_scan.l_orderkey DESC
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey, remote_scan.count
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_orderkey, count(DISTINCT l_partkey) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey ORDER BY (count(DISTINCT l_partkey)) DESC, l_orderkey DESC LIMIT '10'::bigint
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ Output: l_orderkey, (count(DISTINCT l_partkey))
+ -> Sort
+ Output: l_orderkey, (count(DISTINCT l_partkey))
+ Sort Key: (count(DISTINCT lineitem_hash.l_partkey)) DESC, lineitem_hash.l_orderkey DESC
+ -> GroupAggregate
+ Output: l_orderkey, count(DISTINCT l_partkey)
+ Group Key: lineitem_hash.l_orderkey
+ -> Index Scan Backward using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(22 rows)
+
+-- it is also supported if there is no grouping or grouping is on non-partition field
+SELECT
+ count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ ORDER BY 1 DESC
+ LIMIT 10;
+ count
+---------------------------------------------------------------------
+ 11661
+(1 row)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ ORDER BY 1 DESC
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Limit
+ Output: (count(DISTINCT remote_scan.count))
+ -> Sort
+ Output: (count(DISTINCT remote_scan.count))
+ Sort Key: (count(DISTINCT remote_scan.count)) DESC
+ -> Aggregate
+ Output: count(DISTINCT remote_scan.count)
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.count
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_partkey AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_partkey
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_partkey
+ Group Key: lineitem_hash.l_partkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(19 rows)
+
+SELECT
+ l_shipmode, count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_shipmode | count
+---------------------------------------------------------------------
+ TRUCK | 1757
+ MAIL | 1730
+ AIR | 1702
+ FOB | 1700
+ RAIL | 1696
+ SHIP | 1684
+ REG AIR | 1676
+(7 rows)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ l_shipmode, count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Limit
+ Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count))
+ -> Sort
+ Output: remote_scan.l_shipmode, (count(DISTINCT remote_scan.count))
+ Sort Key: (count(DISTINCT remote_scan.count)) DESC, remote_scan.l_shipmode DESC
+ -> GroupAggregate
+ Output: remote_scan.l_shipmode, count(DISTINCT remote_scan.count)
+ Group Key: remote_scan.l_shipmode
+ -> Sort
+ Output: remote_scan.l_shipmode, remote_scan.count
+ Sort Key: remote_scan.l_shipmode DESC
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_shipmode, remote_scan.count
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_shipmode, l_partkey AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_shipmode, l_partkey
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_shipmode, l_partkey
+ Group Key: lineitem_hash.l_shipmode, lineitem_hash.l_partkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(23 rows)
+
+-- mixed mode count distinct, grouped by partition column
+SELECT
+ l_orderkey, count(distinct l_partkey), count(distinct l_shipmode)
+ FROM lineitem_hash
+ GROUP BY l_orderkey
+ ORDER BY 3 DESC, 2 DESC, 1
+ LIMIT 10;
+ l_orderkey | count | count
+---------------------------------------------------------------------
+ 226 | 7 | 7
+ 1316 | 7 | 7
+ 1477 | 7 | 7
+ 3555 | 7 | 7
+ 12258 | 7 | 7
+ 12835 | 7 | 7
+ 768 | 7 | 6
+ 1121 | 7 | 6
+ 1153 | 7 | 6
+ 1281 | 7 | 6
+(10 rows)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ l_orderkey, count(distinct l_partkey), count(distinct l_shipmode)
+ FROM lineitem_hash
+ GROUP BY l_orderkey
+ ORDER BY 3 DESC, 2 DESC, 1
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Limit
+ Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
+ -> Sort
+ Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
+ Sort Key: remote_scan.count_1 DESC, remote_scan.count DESC, remote_scan.l_orderkey
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_orderkey, count(DISTINCT l_partkey) AS count, count(DISTINCT l_shipmode) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey ORDER BY (count(DISTINCT l_shipmode)) DESC, (count(DISTINCT l_partkey)) DESC, l_orderkey LIMIT '10'::bigint
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ Output: l_orderkey, (count(DISTINCT l_partkey)), (count(DISTINCT l_shipmode))
+ -> Sort
+ Output: l_orderkey, (count(DISTINCT l_partkey)), (count(DISTINCT l_shipmode))
+ Sort Key: (count(DISTINCT lineitem_hash.l_shipmode)) DESC, (count(DISTINCT lineitem_hash.l_partkey)) DESC, lineitem_hash.l_orderkey
+ -> GroupAggregate
+ Output: l_orderkey, count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
+ Group Key: lineitem_hash.l_orderkey
+ -> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(22 rows)
+
+-- partition/non-partition column count distinct no grouping
+SELECT
+ count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode)
+ FROM lineitem_hash;
+ count | count | count
+---------------------------------------------------------------------
+ 2985 | 11661 | 7
+(1 row)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ count(distinct l_orderkey), count(distinct l_partkey), count(distinct l_shipmode)
+ FROM lineitem_hash;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Aggregate
+ Output: count(DISTINCT remote_scan.count), count(DISTINCT remote_scan.count_1), count(DISTINCT remote_scan.count_2)
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_orderkey AS count, l_partkey AS count, l_shipmode AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey, l_partkey, l_shipmode
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_orderkey, l_partkey, l_shipmode
+ Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(14 rows)
+
+-- distinct/non-distinct on partition and non-partition columns
+SELECT
+ count(distinct l_orderkey), count(l_orderkey),
+ count(distinct l_partkey), count(l_partkey),
+ count(distinct l_shipmode), count(l_shipmode)
+ FROM lineitem_hash;
+ count | count | count | count | count | count
+---------------------------------------------------------------------
+ 2985 | 12000 | 11661 | 12000 | 7 | 12000
+(1 row)
+
+-- mixed mode count distinct, grouped by non-partition column
+SELECT
+ l_shipmode, count(distinct l_partkey), count(distinct l_orderkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ ORDER BY 1, 2 DESC, 3 DESC;
+ l_shipmode | count | count
+---------------------------------------------------------------------
+ AIR | 1702 | 1327
+ FOB | 1700 | 1276
+ MAIL | 1730 | 1299
+ RAIL | 1696 | 1265
+ REG AIR | 1676 | 1275
+ SHIP | 1684 | 1289
+ TRUCK | 1757 | 1333
+(7 rows)
+
+-- mixed mode count distinct, grouped by non-partition column
+-- having on partition column
+SELECT
+ l_shipmode, count(distinct l_partkey), count(distinct l_orderkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ HAVING count(distinct l_orderkey) > 1300
+ ORDER BY 1, 2 DESC;
+ l_shipmode | count | count
+---------------------------------------------------------------------
+ AIR | 1702 | 1327
+ TRUCK | 1757 | 1333
+(2 rows)
+
+-- same but having clause is not on target list
+SELECT
+ l_shipmode, count(distinct l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ HAVING count(distinct l_orderkey) > 1300
+ ORDER BY 1, 2 DESC;
+ l_shipmode | count
+---------------------------------------------------------------------
+ AIR | 1702
+ TRUCK | 1757
+(2 rows)
+
+-- mixed mode count distinct, grouped by non-partition column
+-- having on non-partition column
+SELECT
+ l_shipmode, count(distinct l_partkey), count(distinct l_suppkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ HAVING count(distinct l_suppkey) > 1550
+ ORDER BY 1, 2 DESC;
+ l_shipmode | count | count
+---------------------------------------------------------------------
+ AIR | 1702 | 1564
+ FOB | 1700 | 1571
+ MAIL | 1730 | 1573
+ RAIL | 1696 | 1581
+ REG AIR | 1676 | 1557
+ SHIP | 1684 | 1554
+ TRUCK | 1757 | 1602
+(7 rows)
+
+-- same but having clause is not on target list
+SELECT
+ l_shipmode, count(distinct l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_shipmode
+ HAVING count(distinct l_suppkey) > 1550
+ ORDER BY 1, 2 DESC;
+ l_shipmode | count
+---------------------------------------------------------------------
+ AIR | 1702
+ FOB | 1700
+ MAIL | 1730
+ RAIL | 1696
+ REG AIR | 1676
+ SHIP | 1684
+ TRUCK | 1757
+(7 rows)
+
+-- count distinct is supported on single table subqueries
+SELECT *
+ FROM (
+ SELECT
+ l_orderkey, count(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_orderkey) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_orderkey | count
+---------------------------------------------------------------------
+ 14885 | 7
+ 14884 | 7
+ 14821 | 7
+ 14790 | 7
+ 14785 | 7
+ 14755 | 7
+ 14725 | 7
+ 14694 | 7
+ 14627 | 7
+ 14624 | 7
+(10 rows)
+
+SELECT *
+ FROM (
+ SELECT
+ l_partkey, count(DISTINCT l_orderkey)
+ FROM lineitem_hash
+ GROUP BY l_partkey) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_partkey | count
+---------------------------------------------------------------------
+ 199146 | 3
+ 188804 | 3
+ 177771 | 3
+ 160895 | 3
+ 149926 | 3
+ 136884 | 3
+ 87761 | 3
+ 15283 | 3
+ 6983 | 3
+ 1927 | 3
+(10 rows)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT *
+ FROM (
+ SELECT
+ l_partkey, count(DISTINCT l_orderkey)
+ FROM lineitem_hash
+ GROUP BY l_partkey) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_partkey, remote_scan.count
+ -> Distributed Subplan XXX_1
+ -> HashAggregate
+ Output: remote_scan.l_partkey, COALESCE((pg_catalog.sum(remote_scan.count))::bigint, '0'::bigint)
+ Group Key: remote_scan.l_partkey
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_partkey, remote_scan.count
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_partkey, count(DISTINCT l_orderkey) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_partkey
+ Node: host=localhost port=xxxxx dbname=regression
+ -> GroupAggregate
+ Output: l_partkey, count(DISTINCT l_orderkey)
+ Group Key: lineitem_hash.l_partkey
+ -> Sort
+ Output: l_partkey, l_orderkey
+ Sort Key: lineitem_hash.l_partkey
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_partkey, l_orderkey
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Query: SELECT l_partkey, count FROM (SELECT intermediate_result.l_partkey, intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_partkey integer, count bigint)) sub ORDER BY count DESC, l_partkey DESC LIMIT 10
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ Output: intermediate_result.l_partkey, intermediate_result.count
+ -> Sort
+ Output: intermediate_result.l_partkey, intermediate_result.count
+ Sort Key: intermediate_result.count DESC, intermediate_result.l_partkey DESC
+ -> Function Scan on pg_catalog.read_intermediate_result intermediate_result
+ Output: intermediate_result.l_partkey, intermediate_result.count
+ Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
+(34 rows)
+
+-- count distinct with filters
+SELECT
+ l_orderkey,
+ count(DISTINCT l_suppkey) FILTER (WHERE l_shipmode = 'AIR'),
+ count(DISTINCT l_suppkey)
+ FROM lineitem_hash
+ GROUP BY l_orderkey
+ ORDER BY 2 DESC, 3 DESC, 1
+ LIMIT 10;
+ l_orderkey | count | count
+---------------------------------------------------------------------
+ 4964 | 4 | 7
+ 12005 | 4 | 7
+ 5409 | 4 | 6
+ 164 | 3 | 7
+ 322 | 3 | 7
+ 871 | 3 | 7
+ 1156 | 3 | 7
+ 1574 | 3 | 7
+ 2054 | 3 | 7
+ 2309 | 3 | 7
+(10 rows)
+
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ l_orderkey,
+ count(DISTINCT l_suppkey) FILTER (WHERE l_shipmode = 'AIR'),
+ count(DISTINCT l_suppkey)
+ FROM lineitem_hash
+ GROUP BY l_orderkey
+ ORDER BY 2 DESC, 3 DESC, 1
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Limit
+ Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
+ -> Sort
+ Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
+ Sort Key: remote_scan.count DESC, remote_scan.count_1 DESC, remote_scan.l_orderkey
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey, remote_scan.count, remote_scan.count_1
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_orderkey, count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)) AS count, count(DISTINCT l_suppkey) AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_orderkey ORDER BY (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar))) DESC, (count(DISTINCT l_suppkey)) DESC, l_orderkey LIMIT '10'::bigint
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ Output: l_orderkey, (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar))), (count(DISTINCT l_suppkey))
+ -> Sort
+ Output: l_orderkey, (count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar))), (count(DISTINCT l_suppkey))
+ Sort Key: (count(DISTINCT lineitem_hash.l_suppkey) FILTER (WHERE (lineitem_hash.l_shipmode = 'AIR'::bpchar))) DESC, (count(DISTINCT lineitem_hash.l_suppkey)) DESC, lineitem_hash.l_orderkey
+ -> GroupAggregate
+ Output: l_orderkey, count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar)), count(DISTINCT l_suppkey)
+ Group Key: lineitem_hash.l_orderkey
+ -> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(22 rows)
+
+-- group by on non-partition column
+SELECT
+ l_suppkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR')
+ FROM lineitem_hash
+ GROUP BY l_suppkey
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_suppkey | count
+---------------------------------------------------------------------
+ 7680 | 4
+ 7703 | 3
+ 7542 | 3
+ 7072 | 3
+ 6335 | 3
+ 5873 | 3
+ 1318 | 3
+ 1042 | 3
+ 160 | 3
+ 9872 | 2
+(10 rows)
+
+-- explaining the same query fails
+EXPLAIN (COSTS false, VERBOSE true)
+SELECT
+ l_suppkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR')
+ FROM lineitem_hash
+ GROUP BY l_suppkey
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Limit
+ Output: remote_scan.l_suppkey, (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar)))
+ -> Sort
+ Output: remote_scan.l_suppkey, (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar)))
+ Sort Key: (count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar))) DESC, remote_scan.l_suppkey DESC
+ -> GroupAggregate
+ Output: remote_scan.l_suppkey, count(DISTINCT remote_scan.count) FILTER (WHERE (remote_scan.count_1 = 'AIR'::bpchar))
+ Group Key: remote_scan.l_suppkey
+ -> Sort
+ Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
+ Sort Key: remote_scan.l_suppkey DESC
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
+ Task Count: 8
+ Tasks Shown: One of 8
+ -> Task
+ Query: SELECT l_suppkey, l_partkey AS count, l_shipmode AS count FROM public.lineitem_hash_240000 lineitem_hash WHERE true GROUP BY l_suppkey, l_partkey, l_shipmode
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_suppkey, l_partkey, l_shipmode
+ Group Key: lineitem_hash.l_suppkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
+ -> Seq Scan on public.lineitem_hash_240000 lineitem_hash
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+(23 rows)
+
+-- without group by, on partition column
+SELECT
+ count(DISTINCT l_orderkey) FILTER (WHERE l_shipmode = 'AIR')
+ FROM lineitem_hash;
+ count
+---------------------------------------------------------------------
+ 1327
+(1 row)
+
+-- without group by, on non-partition column
+SELECT
+ count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR')
+ FROM lineitem_hash;
+ count
+---------------------------------------------------------------------
+ 1702
+(1 row)
+
+SELECT
+ count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR'),
+ count(DISTINCT l_partkey),
+ count(DISTINCT l_shipdate)
+ FROM lineitem_hash;
+ count | count | count
+---------------------------------------------------------------------
+ 1702 | 11661 | 2470
+(1 row)
+
+-- filter column already exists in target list
+SELECT *
+ FROM (
+ SELECT
+ l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_orderkey > 100)
+ FROM lineitem_hash
+ GROUP BY l_orderkey) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_orderkey | count
+---------------------------------------------------------------------
+ 14885 | 7
+ 14884 | 7
+ 14821 | 7
+ 14790 | 7
+ 14785 | 7
+ 14755 | 7
+ 14725 | 7
+ 14694 | 7
+ 14627 | 7
+ 14624 | 7
+(10 rows)
+
+-- filter column does not exist in target list
+SELECT *
+ FROM (
+ SELECT
+ l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR')
+ FROM lineitem_hash
+ GROUP BY l_orderkey) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_orderkey | count
+---------------------------------------------------------------------
+ 12005 | 4
+ 5409 | 4
+ 4964 | 4
+ 14848 | 3
+ 14496 | 3
+ 13473 | 3
+ 13122 | 3
+ 12929 | 3
+ 12645 | 3
+ 12417 | 3
+(10 rows)
+
+-- case expr in count distinct is supported.
+-- count orders partkeys if l_shipmode is air
+SELECT *
+ FROM (
+ SELECT
+ l_orderkey, count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) as count
+ FROM lineitem_hash
+ GROUP BY l_orderkey) sub
+ WHERE count > 0
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_orderkey | count
+---------------------------------------------------------------------
+ 12005 | 4
+ 5409 | 4
+ 4964 | 4
+ 14848 | 3
+ 14496 | 3
+ 13473 | 3
+ 13122 | 3
+ 12929 | 3
+ 12645 | 3
+ 12417 | 3
+(10 rows)
+
+-- text like operator is also supported
+SELECT *
+ FROM (
+ SELECT
+ l_orderkey, count(DISTINCT CASE WHEN l_shipmode like '%A%' THEN l_partkey ELSE NULL END) as count
+ FROM lineitem_hash
+ GROUP BY l_orderkey) sub
+ WHERE count > 0
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ l_orderkey | count
+---------------------------------------------------------------------
+ 14275 | 7
+ 14181 | 7
+ 13605 | 7
+ 12707 | 7
+ 12384 | 7
+ 11746 | 7
+ 10727 | 7
+ 10467 | 7
+ 5636 | 7
+ 4614 | 7
+(10 rows)
+
+-- count distinct is rejected if it does not reference any columns
+SELECT *
+ FROM (
+ SELECT
+ l_linenumber, count(DISTINCT 1)
+ FROM lineitem_hash
+ GROUP BY l_linenumber) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ERROR: cannot compute aggregate (distinct)
+DETAIL: aggregate (distinct) with no columns is unsupported
+HINT: You can load the hll extension from contrib packages and enable distinct approximations.
+-- count distinct is rejected if it does not reference any columns
+SELECT *
+ FROM (
+ SELECT
+ l_linenumber, count(DISTINCT (random() * 5)::int)
+ FROM lineitem_hash
+ GROUP BY l_linenumber) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ERROR: cannot compute aggregate (distinct)
+DETAIL: aggregate (distinct) with no columns is unsupported
+HINT: You can load the hll extension from contrib packages and enable distinct approximations.
+-- even non-const function calls are supported within count distinct
+SELECT *
+ FROM (
+ SELECT
+ l_orderkey, count(DISTINCT (random() * 5)::int = l_linenumber)
+ FROM lineitem_hash
+ GROUP BY l_orderkey) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 0;
+ l_orderkey | count
+---------------------------------------------------------------------
+(0 rows)
+
+-- multiple nested subquery
+SELECT
+ total,
+ avg(avg_count) as total_avg_count
+ FROM (
+ SELECT
+ number_sum,
+ count(DISTINCT l_suppkey) as total,
+ avg(total_count) avg_count
+ FROM (
+ SELECT
+ l_suppkey,
+ sum(l_linenumber) as number_sum,
+ count(DISTINCT l_shipmode) as total_count
+ FROM
+ lineitem_hash
+ WHERE
+ l_partkey > 100 and
+ l_quantity > 2 and
+ l_orderkey < 10000
+ GROUP BY
+ l_suppkey) as distributed_table
+ WHERE
+ number_sum >= 10
+ GROUP BY
+ number_sum) as distributed_table_2
+ GROUP BY
+ total
+ ORDER BY
+ total_avg_count DESC;
+ total | total_avg_count
+---------------------------------------------------------------------
+ 1 | 3.6000000000000000
+ 6 | 2.8333333333333333
+ 10 | 2.6000000000000000
+ 27 | 2.5555555555555556
+ 32 | 2.4687500000000000
+ 77 | 2.1948051948051948
+ 57 | 2.1754385964912281
+(7 rows)
+
+-- multiple cases query
+SELECT *
+ FROM (
+ SELECT
+ count(DISTINCT
+ CASE
+ WHEN l_shipmode = 'TRUCK' THEN l_partkey
+ WHEN l_shipmode = 'AIR' THEN l_quantity
+ WHEN l_shipmode = 'SHIP' THEN l_discount
+ ELSE l_suppkey
+ END) as count,
+ l_shipdate
+ FROM
+ lineitem_hash
+ GROUP BY
+ l_shipdate) sub
+ WHERE
+ count > 0
+ ORDER BY
+ 1 DESC, 2 DESC
+ LIMIT 10;
+ count | l_shipdate
+---------------------------------------------------------------------
+ 14 | 07-30-1997
+ 13 | 05-26-1998
+ 13 | 08-08-1997
+ 13 | 11-17-1995
+ 13 | 01-09-1993
+ 12 | 01-15-1998
+ 12 | 10-15-1997
+ 12 | 09-07-1997
+ 12 | 06-02-1997
+ 12 | 03-14-1997
+(10 rows)
+
+-- count DISTINCT expression
+SELECT *
+ FROM (
+ SELECT
+ l_quantity, count(DISTINCT ((l_orderkey / 1000) * 1000 )) as count
+ FROM
+ lineitem_hash
+ GROUP BY
+ l_quantity) sub
+ WHERE
+ count > 0
+ ORDER BY
+ 2 DESC, 1 DESC
+ LIMIT 10;
+ l_quantity | count
+---------------------------------------------------------------------
+ 48.00 | 13
+ 47.00 | 13
+ 37.00 | 13
+ 33.00 | 13
+ 26.00 | 13
+ 25.00 | 13
+ 23.00 | 13
+ 21.00 | 13
+ 15.00 | 13
+ 12.00 | 13
+(10 rows)
+
+-- count DISTINCT is part of an expression which includes another aggregate
+SELECT *
+ FROM (
+ SELECT
+ sum(((l_partkey * l_tax) / 100)) /
+ count(DISTINCT
+ CASE
+ WHEN l_shipmode = 'TRUCK' THEN l_partkey
+ ELSE l_suppkey
+ END) as avg,
+ l_shipmode
+ FROM
+ lineitem_hash
+ GROUP BY
+ l_shipmode) sub
+ ORDER BY
+ 1 DESC, 2 DESC
+ LIMIT 10;
+ avg | l_shipmode
+---------------------------------------------------------------------
+ 44.82904609027336300064 | MAIL
+ 44.80704536679536679537 | SHIP
+ 44.68891732736572890026 | AIR
+ 44.34106724470134874759 | REG AIR
+ 43.12739987269255251432 | FOB
+ 43.07299253636938646426 | RAIL
+ 40.50298377916903813318 | TRUCK
+(7 rows)
+
+-- count DISTINCT CASE WHEN expression
+SELECT *
+ FROM (
+ SELECT
+ count(DISTINCT
+ CASE
+ WHEN l_shipmode = 'TRUCK' THEN l_linenumber
+ WHEN l_shipmode = 'AIR' THEN l_linenumber + 10
+ ELSE 2
+ END) as avg
+ FROM
+ lineitem_hash
+ GROUP BY l_shipdate) sub
+ ORDER BY 1 DESC
+ LIMIT 10;
+ avg
+---------------------------------------------------------------------
+ 7
+ 6
+ 6
+ 6
+ 6
+ 6
+ 6
+ 6
+ 5
+ 5
+(10 rows)
+
+-- COUNT DISTINCT (c1, c2)
+SELECT *
+ FROM
+ (SELECT
+ l_shipmode,
+ count(DISTINCT (l_shipdate, l_tax))
+ FROM
+ lineitem_hash
+ GROUP BY
+ l_shipmode) t
+ ORDER BY
+ 2 DESC,1 DESC
+ LIMIT 10;
+ l_shipmode | count
+---------------------------------------------------------------------
+ TRUCK | 1689
+ MAIL | 1683
+ FOB | 1655
+ AIR | 1650
+ SHIP | 1644
+ RAIL | 1636
+ REG AIR | 1607
+(7 rows)
+
+-- distinct on non-var (type cast/field select) columns are also
+-- supported if grouped on distribution column
+-- random is added to prevent flattening by postgresql
+SELECT
+ l_orderkey, count(a::int), count(distinct a::int)
+ FROM (
+ SELECT l_orderkey, l_orderkey * 1.5 a, random() b
+ FROM lineitem_hash) sub
+ GROUP BY 1
+ ORDER BY 1 DESC
+ LIMIT 5;
+ l_orderkey | count | count
+---------------------------------------------------------------------
+ 14947 | 2 | 1
+ 14946 | 2 | 1
+ 14945 | 6 | 1
+ 14944 | 2 | 1
+ 14919 | 1 | 1
+(5 rows)
+
+SELECT user_id,
+ count(sub.a::int),
+ count(DISTINCT sub.a::int),
+ count(DISTINCT (sub).a)
+FROM
+ (SELECT user_id,
+ unnest(ARRAY[user_id * 1.5])a,
+ random() b
+ FROM users_table
+ ) sub
+GROUP BY 1
+ORDER BY 1 DESC
+LIMIT 5;
+ user_id | count | count | count
+---------------------------------------------------------------------
+ 6 | 11 | 1 | 1
+ 5 | 27 | 1 | 1
+ 4 | 24 | 1 | 1
+ 3 | 18 | 1 | 1
+ 2 | 19 | 1 | 1
+(5 rows)
+
+CREATE TYPE test_item AS
+(
+ id INTEGER,
+ duration INTEGER
+);
+CREATE TABLE test_count_distinct_array (key int, value int , value_arr test_item[]);
+SELECT create_distributed_table('test_count_distinct_array', 'key');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO test_count_distinct_array SELECT i, i, ARRAY[(i,i)::test_item] FROM generate_Series(0, 1000) i;
+SELECT
+ key,
+ count(DISTINCT value),
+ count(DISTINCT (item)."id"),
+ count(DISTINCT (item)."id" * 3)
+FROM
+ (
+ SELECT key, unnest(value_arr) as item, value FROM test_count_distinct_array
+ ) as sub
+GROUP BY 1
+ORDER BY 1 DESC
+LIMIT 5;
+ key | count | count | count
+---------------------------------------------------------------------
+ 1000 | 1 | 1 | 1
+ 999 | 1 | 1 | 1
+ 998 | 1 | 1 | 1
+ 997 | 1 | 1 | 1
+ 996 | 1 | 1 | 1
+(5 rows)
+
+DROP TABLE test_count_distinct_array;
+DROP TYPE test_item;
+-- other distinct aggregate are not supported
+SELECT *
+ FROM (
+ SELECT
+ l_linenumber, sum(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_linenumber) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ERROR: cannot compute aggregate (distinct)
+DETAIL: table partitioning is unsuitable for aggregate (distinct)
+SELECT *
+ FROM (
+ SELECT
+ l_linenumber, avg(DISTINCT l_partkey)
+ FROM lineitem_hash
+ GROUP BY l_linenumber) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ERROR: cannot compute aggregate (distinct)
+DETAIL: table partitioning is unsuitable for aggregate (distinct)
+-- whole row references, oid, and ctid are not supported in count distinct
+-- test table does not have oid or ctid enabled, so tests for them are skipped
+SELECT *
+ FROM (
+ SELECT
+ l_linenumber, count(DISTINCT lineitem_hash)
+ FROM lineitem_hash
+ GROUP BY l_linenumber) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ERROR: cannot compute count (distinct)
+DETAIL: Non-column references are not supported yet
+SELECT *
+ FROM (
+ SELECT
+ l_linenumber, count(DISTINCT lineitem_hash.*)
+ FROM lineitem_hash
+ GROUP BY l_linenumber) sub
+ ORDER BY 2 DESC, 1 DESC
+ LIMIT 10;
+ERROR: cannot compute count (distinct)
+DETAIL: Non-column references are not supported yet
+-- count distinct pushdown is enabled
+SELECT *
+ FROM (
+ SELECT
+ l_shipdate,
+ count(DISTINCT
+ CASE
+ WHEN l_shipmode = 'TRUCK' THEN l_partkey
+ ELSE NULL
+ END) as distinct_part,
+ extract(year from l_shipdate) as year
+ FROM
+ lineitem_hash
+ GROUP BY l_shipdate, year) sub
+ WHERE year = 1995
+ ORDER BY 2 DESC, 1
+ LIMIT 10;
+ l_shipdate | distinct_part | year
+---------------------------------------------------------------------
+ 11-29-1995 | 5 | 1995
+ 03-24-1995 | 4 | 1995
+ 09-18-1995 | 4 | 1995
+ 01-17-1995 | 3 | 1995
+ 04-02-1995 | 3 | 1995
+ 05-23-1995 | 3 | 1995
+ 08-11-1995 | 3 | 1995
+ 09-27-1995 | 3 | 1995
+ 10-27-1995 | 3 | 1995
+ 10-30-1995 | 3 | 1995
+(10 rows)
+
+-- count distinct pushdown is enabled
+SELECT *
+ FROM (
+ SELECT
+ l_shipdate,
+ count(DISTINCT
+ CASE
+ WHEN l_shipmode = 'TRUCK' THEN l_partkey
+ ELSE NULL
+ END) as distinct_part,
+ extract(year from l_shipdate) as year
+ FROM
+ lineitem_hash
+ GROUP BY l_shipdate, year) sub
+ WHERE year = 1995
+ ORDER BY 2 DESC, 1
+ LIMIT 10;
+ l_shipdate | distinct_part | year
+---------------------------------------------------------------------
+ 11-29-1995 | 5 | 1995
+ 03-24-1995 | 4 | 1995
+ 09-18-1995 | 4 | 1995
+ 01-17-1995 | 3 | 1995
+ 04-02-1995 | 3 | 1995
+ 05-23-1995 | 3 | 1995
+ 08-11-1995 | 3 | 1995
+ 09-27-1995 | 3 | 1995
+ 10-27-1995 | 3 | 1995
+ 10-30-1995 | 3 | 1995
+(10 rows)
+
+SELECT *
+ FROM (
+ SELECT
+ l_shipdate,
+ count(DISTINCT
+ CASE
+ WHEN l_shipmode = 'TRUCK' THEN l_partkey
+ ELSE NULL
+ END) as distinct_part,
+ extract(year from l_shipdate) as year
+ FROM
+ lineitem_hash
+ GROUP BY l_shipdate) sub
+ WHERE year = 1995
+ ORDER BY 2 DESC, 1
+ LIMIT 10;
+ l_shipdate | distinct_part | year
+---------------------------------------------------------------------
+ 11-29-1995 | 5 | 1995
+ 03-24-1995 | 4 | 1995
+ 09-18-1995 | 4 | 1995
+ 01-17-1995 | 3 | 1995
+ 04-02-1995 | 3 | 1995
+ 05-23-1995 | 3 | 1995
+ 08-11-1995 | 3 | 1995
+ 09-27-1995 | 3 | 1995
+ 10-27-1995 | 3 | 1995
+ 10-30-1995 | 3 | 1995
+(10 rows)
+
+DROP TABLE lineitem_hash;
diff --git a/src/test/regress/expected/multi_create_fdw.out b/src/test/regress/expected/multi_create_fdw.out
index e15d17546..3379a82fa 100644
--- a/src/test/regress/expected/multi_create_fdw.out
+++ b/src/test/regress/expected/multi_create_fdw.out
@@ -3,7 +3,11 @@ SET citus.next_shard_id TO 390000;
-- get ready for the foreign data wrapper tests
-- ===================================================================
-- create fake fdw for use in tests
-CREATE FUNCTION fake_fdw_handler()
+SET client_min_messages TO WARNING;
+DROP SERVER IF EXISTS fake_fdw_server CASCADE;
+DROP FOREIGN DATA WRAPPER IF EXISTS fake_fdw CASCADE;
+RESET client_min_messages;
+CREATE OR REPLACE FUNCTION fake_fdw_handler()
RETURNS fdw_handler
AS 'citus'
LANGUAGE C STRICT;
diff --git a/src/test/regress/expected/multi_deparse_shard_query.out b/src/test/regress/expected/multi_deparse_shard_query.out
index b24c0e4cb..4657db10d 100644
--- a/src/test/regress/expected/multi_deparse_shard_query.out
+++ b/src/test/regress/expected/multi_deparse_shard_query.out
@@ -12,6 +12,8 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
t
(1 row)
+CREATE SCHEMA multi_deparse_shard_query;
+SET search_path TO multi_deparse_shard_query;
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
CREATE FUNCTION deparse_shard_query_test(text)
@@ -74,7 +76,7 @@ SELECT deparse_shard_query_test('
INSERT INTO raw_events_1
SELECT * FROM raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_1, raw_events_1_1.value_2, raw_events_1_1.value_3, raw_events_1_1.value_4, raw_events_1_1.value_5, raw_events_1_1.value_6, raw_events_1_1.value_7, raw_events_1_1.event_at FROM public.raw_events_1 raw_events_1_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_1, raw_events_1_1.value_2, raw_events_1_1.value_3, raw_events_1_1.value_4, raw_events_1_1.value_5, raw_events_1_1.value_6, raw_events_1_1.value_7, raw_events_1_1.event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -87,7 +89,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -101,7 +103,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_1_1.tenant_id, (raw_events_1_1.value_5)::integer AS value_5, raw_events_1_1.value_4, (raw_events_1_1.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_1_1.tenant_id, (raw_events_1_1.value_5)::integer AS value_5, raw_events_1_1.value_4, (raw_events_1_1.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -115,7 +117,7 @@ SELECT
FROM
raw_events_2;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_2.tenant_id, (raw_events_2.value_5)::integer AS value_5, raw_events_2.value_4, (raw_events_2.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT raw_events_2.tenant_id, (raw_events_2.value_5)::integer AS value_5, raw_events_2.value_4, (raw_events_2.value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_2
deparse_shard_query_test
---------------------------------------------------------------------
@@ -131,7 +133,7 @@ FROM
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT raw_events_1.tenant_id, sum(raw_events_1.value_1) AS sum, avg(raw_events_1.value_3) AS avg, sum(raw_events_1.value_4) AS sum, avg(raw_events_1.value_6) AS avg, date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY raw_events_1.tenant_id, (date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone))
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT raw_events_1.tenant_id, sum(raw_events_1.value_1) AS sum, avg(raw_events_1.value_3) AS avg, sum(raw_events_1.value_4) AS sum, avg(raw_events_1.value_6) AS avg, date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone) AS date_trunc FROM multi_deparse_shard_query.raw_events_1 GROUP BY raw_events_1.tenant_id, (date_trunc('hour'::text, (raw_events_1.event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@@ -148,7 +150,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1, public.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
deparse_shard_query_test
---------------------------------------------------------------------
@@ -164,7 +166,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1, public.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1_1.event_at
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1_1.event_at
deparse_shard_query_test
---------------------------------------------------------------------
@@ -184,7 +186,7 @@ GROUP BY
ORDER BY
r2.event_at DESC;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM multi_deparse_shard_query.raw_events_1 r1, multi_deparse_shard_query.raw_events_2 r2, multi_deparse_shard_query.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
deparse_shard_query_test
---------------------------------------------------------------------
@@ -201,7 +203,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
-INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum, raw_events_1.event_at FROM public.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
+INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum, raw_events_1.event_at FROM multi_deparse_shard_query.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@@ -217,7 +219,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
-INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
+INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, sum((raw_events_1.value_5)::integer) AS sum FROM multi_deparse_shard_query.raw_events_1 GROUP BY raw_events_1.event_at, raw_events_1.tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@@ -236,7 +238,7 @@ WITH RECURSIVE hierarchy as (
h.value_1 = re.value_6))
SELECT * FROM hierarchy WHERE LEVEL <= 2;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT hierarchy.tenant_id, hierarchy.value_1, hierarchy.level FROM hierarchy WHERE (hierarchy.level OPERATOR(pg_catalog.<=) 2)
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN multi_deparse_shard_query.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT hierarchy.tenant_id, hierarchy.value_1, hierarchy.level FROM hierarchy WHERE (hierarchy.level OPERATOR(pg_catalog.<=) 2)
deparse_shard_query_test
---------------------------------------------------------------------
@@ -249,7 +251,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT raw_events_1.value_1 FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (sum_value_1) SELECT DISTINCT raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -262,7 +264,7 @@ SELECT value_3, value_2, tenant_id
FROM raw_events_1
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT raw_events_1.tenant_id, raw_events_1.value_2, raw_events_1.value_3 FROM public.raw_events_1 WHERE (((raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) AND ((raw_events_1.value_6 OPERATOR(pg_catalog.<) 3000) OR (raw_events_1.value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT raw_events_1.tenant_id, raw_events_1.value_2, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1 WHERE (((raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (raw_events_1.value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) AND ((raw_events_1.value_6 OPERATOR(pg_catalog.<) 3000) OR (raw_events_1.value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
deparse_shard_query_test
---------------------------------------------------------------------
@@ -274,7 +276,7 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
FROM raw_events_1
WHERE event_at = now();
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, rank() OVER (PARTITION BY raw_events_1.tenant_id ORDER BY raw_events_1.value_6) AS rank FROM public.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now())
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT raw_events_1.tenant_id, rank() OVER (PARTITION BY raw_events_1.tenant_id ORDER BY raw_events_1.value_6) AS rank FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now())
deparse_shard_query_test
---------------------------------------------------------------------
@@ -287,7 +289,7 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6
WHERE event_at = now()
GROUP BY event_at, value_7, value_6;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(raw_events_1.value_1)))::integer AS int4eq, raw_events_1.value_6, random() AS random FROM public.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now()) GROUP BY raw_events_1.event_at, raw_events_1.value_7, raw_events_1.value_6
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(raw_events_1.value_1)))::integer AS int4eq, raw_events_1.value_6, random() AS random FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.event_at OPERATOR(pg_catalog.=) now()) GROUP BY raw_events_1.event_at, raw_events_1.value_7, raw_events_1.value_6
deparse_shard_query_test
---------------------------------------------------------------------
@@ -308,7 +310,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(raw_events_1.tenant_id) AS max, count(DISTINCT CASE WHEN (raw_events_1.value_1 OPERATOR(pg_catalog.>) 100) THEN raw_events_1.tenant_id ELSE (raw_events_1.value_6)::bigint END) AS c FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1) SELECT max(raw_events_1.tenant_id) AS max, count(DISTINCT CASE WHEN (raw_events_1.value_1 OPERATOR(pg_catalog.>) 100) THEN raw_events_1.tenant_id ELSE (raw_events_1.value_6)::bigint END) AS c FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -325,7 +327,7 @@ FROM
raw_events_2
) as foo
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT foo.tenant_id, foo.value_1, 10 AS value_6, foo.value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT foo.tenant_id, foo.value_1, 10 AS value_6, foo.value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM multi_deparse_shard_query.raw_events_2) foo
deparse_shard_query_test
---------------------------------------------------------------------
@@ -346,7 +348,7 @@ FROM
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT foo.tenant_id, sum(foo.value_1) AS sum, sum((foo.value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY foo.tenant_id, (date_trunc('hour'::text, (foo.event_at)::timestamp with time zone))
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT foo.tenant_id, sum(foo.value_1) AS sum, sum((foo.value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_2, multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY foo.tenant_id, (date_trunc('hour'::text, (foo.event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@@ -363,7 +365,7 @@ FROM
raw_events_1
) as foo
');
-INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.tenant_id, foo.value_1, foo.value_2, foo.value_3, foo.value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.tenant_id, foo.value_1, foo.value_2, foo.value_3, foo.value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@@ -380,7 +382,7 @@ FROM
raw_events_1
) as foo
');
-INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.value_2, foo.value_4, foo.value_1, foo.value_3, foo.tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT foo.value_2, foo.value_4, foo.value_1, foo.value_3, foo.tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@@ -396,7 +398,7 @@ FROM
ORDER BY
value_2, value_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_7, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1 ORDER BY raw_events_1_1.value_2, raw_events_1_1.value_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_7, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1 ORDER BY raw_events_1_1.value_2, raw_events_1_1.value_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -411,9 +413,11 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM public.raw_events_1 raw_events_1_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT raw_events_1_1.tenant_id, raw_events_1_1.value_4, 10 AS value_6, raw_events_1_1.value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 raw_events_1_1
deparse_shard_query_test
---------------------------------------------------------------------
(1 row)
+SET client_min_messages TO ERROR;
+DROP SCHEMA multi_deparse_shard_query CASCADE;
diff --git a/src/test/regress/expected/multi_deparse_shard_query_0.out b/src/test/regress/expected/multi_deparse_shard_query_0.out
index 71742c589..4f2ca98b8 100644
--- a/src/test/regress/expected/multi_deparse_shard_query_0.out
+++ b/src/test/regress/expected/multi_deparse_shard_query_0.out
@@ -12,6 +12,8 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
f
(1 row)
+CREATE SCHEMA multi_deparse_shard_query;
+SET search_path TO multi_deparse_shard_query;
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
CREATE FUNCTION deparse_shard_query_test(text)
@@ -74,7 +76,7 @@ SELECT deparse_shard_query_test('
INSERT INTO raw_events_1
SELECT * FROM raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -87,7 +89,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -101,7 +103,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -115,7 +117,7 @@ SELECT
FROM
raw_events_2;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_2
deparse_shard_query_test
---------------------------------------------------------------------
@@ -131,7 +133,7 @@ FROM
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM multi_deparse_shard_query.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@@ -148,7 +150,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)
deparse_shard_query_test
---------------------------------------------------------------------
@@ -164,7 +166,7 @@ FROM
WHERE
raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1, multi_deparse_shard_query.raw_events_2 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id) GROUP BY raw_events_1.event_at
deparse_shard_query_test
---------------------------------------------------------------------
@@ -184,7 +186,7 @@ GROUP BY
ORDER BY
r2.event_at DESC;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM multi_deparse_shard_query.raw_events_1 r1, multi_deparse_shard_query.raw_events_2 r2, multi_deparse_shard_query.raw_events_1 r3 WHERE ((r1.tenant_id OPERATOR(pg_catalog.=) r2.tenant_id) AND (r2.tenant_id OPERATOR(pg_catalog.=) r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC
deparse_shard_query_test
---------------------------------------------------------------------
@@ -201,7 +203,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
-INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id
+INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM multi_deparse_shard_query.raw_events_1 GROUP BY event_at, tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@@ -217,7 +219,7 @@ FROM
GROUP BY
event_at, tenant_id;
');
-INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id
+INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1) INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM multi_deparse_shard_query.raw_events_1 GROUP BY event_at, tenant_id
deparse_shard_query_test
---------------------------------------------------------------------
@@ -236,7 +238,7 @@ WITH RECURSIVE hierarchy as (
h.value_1 = re.value_6))
SELECT * FROM hierarchy WHERE LEVEL <= 2;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) 1) UNION SELECT re.value_2, (h.level OPERATOR(pg_catalog.+) 1), re.tenant_id FROM (hierarchy h JOIN multi_deparse_shard_query.raw_events_1 re ON (((h.tenant_id OPERATOR(pg_catalog.=) re.tenant_id) AND (h.value_1 OPERATOR(pg_catalog.=) re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level OPERATOR(pg_catalog.<=) 2)
deparse_shard_query_test
---------------------------------------------------------------------
@@ -249,7 +251,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -262,7 +264,7 @@ SELECT value_3, value_2, tenant_id
FROM raw_events_1
WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000);
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM multi_deparse_shard_query.raw_events_1 WHERE (((value_5 OPERATOR(pg_catalog.~~) '%s'::text) OR (value_5 OPERATOR(pg_catalog.~~) '%a'::text)) AND (tenant_id OPERATOR(pg_catalog.=) 1) AND ((value_6 OPERATOR(pg_catalog.<) 3000) OR (value_3 OPERATOR(pg_catalog.>) (8000)::double precision)))
deparse_shard_query_test
---------------------------------------------------------------------
@@ -274,7 +276,7 @@ SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id
FROM raw_events_1
WHERE event_at = now();
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM multi_deparse_shard_query.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now())
deparse_shard_query_test
---------------------------------------------------------------------
@@ -287,7 +289,7 @@ SELECT random(), int4eq(1, max(value_1))::int, value_6
WHERE event_at = now()
GROUP BY event_at, value_7, value_6;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM multi_deparse_shard_query.raw_events_1 WHERE (event_at OPERATOR(pg_catalog.=) now()) GROUP BY event_at, value_7, value_6
deparse_shard_query_test
---------------------------------------------------------------------
@@ -308,7 +310,7 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 OPERATOR(pg_catalog.>) 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -325,7 +327,7 @@ FROM
raw_events_2
) as foo
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM multi_deparse_shard_query.raw_events_2) foo
deparse_shard_query_test
---------------------------------------------------------------------
@@ -341,12 +343,12 @@ FROM
FROM
raw_events_2, raw_events_1
WHERE
- raw_events_1.tenant_id = raw_events_2.tenant_id
+ raw_events_1.tenant_id = raw_events_2.tenant_id
) as foo
GROUP BY
tenant_id, date_trunc(\'hour\', event_at)
');
-INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
+INFO: query: INSERT INTO multi_deparse_shard_query.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM multi_deparse_shard_query.raw_events_2, multi_deparse_shard_query.raw_events_1 WHERE (raw_events_1.tenant_id OPERATOR(pg_catalog.=) raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone))
deparse_shard_query_test
---------------------------------------------------------------------
@@ -363,7 +365,7 @@ FROM
raw_events_1
) as foo
');
-INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@@ -380,7 +382,7 @@ FROM
raw_events_1
) as foo
');
-INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() OPERATOR(pg_catalog.*) (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM multi_deparse_shard_query.raw_events_1) foo
deparse_shard_query_test
---------------------------------------------------------------------
@@ -396,7 +398,7 @@ FROM
ORDER BY
value_2, value_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1 ORDER BY value_2, value_1
deparse_shard_query_test
---------------------------------------------------------------------
@@ -411,9 +413,11 @@ SELECT
FROM
raw_events_1;
');
-INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1
+INFO: query: INSERT INTO multi_deparse_shard_query.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM multi_deparse_shard_query.raw_events_1
deparse_shard_query_test
---------------------------------------------------------------------
(1 row)
+SET client_min_messages TO ERROR;
+DROP SCHEMA multi_deparse_shard_query CASCADE;
diff --git a/src/test/regress/expected/multi_explain.out b/src/test/regress/expected/multi_explain.out
index b3e47474f..17b673607 100644
--- a/src/test/regress/expected/multi_explain.out
+++ b/src/test/regress/expected/multi_explain.out
@@ -1,6 +1,18 @@
--
-- MULTI_EXPLAIN
--
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
+ server_version_ge_16
+---------------------------------------------------------------------
+ t
+(1 row)
+
SET citus.next_shard_id TO 570000;
\a\t
SET citus.explain_distributed_queries TO on;
@@ -651,7 +663,7 @@ Aggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Hash Join
Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users
@@ -737,7 +749,7 @@ HashAggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone, events.event_time
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
@@ -853,7 +865,7 @@ Sort
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay, events.event_time
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
@@ -951,7 +963,7 @@ Limit
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
- Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Nested Loop Left Join
-> Limit
-> Sort
@@ -2381,11 +2393,16 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
- -> Hash Join (actual rows=10 loops=1)
- Hash Cond: (ref_table.a = intermediate_result.a)
- -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
- -> Hash (actual rows=10 loops=1)
+ -> Merge Join (actual rows=10 loops=1)
+ Merge Cond: (intermediate_result.a = ref_table.a)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: intermediate_result.a
+ Sort Method: quicksort Memory: 25kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: ref_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
Aggregate (actual rows=1 loops=1)
@@ -2442,9 +2459,12 @@ Aggregate (actual rows=1 loops=1)
-> Aggregate (actual rows=1 loops=1)
InitPlan 1 (returns $0)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
- -> Result (actual rows=4 loops=1)
- One-Time Filter: $0
- -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ -> Sort (actual rows=4 loops=1)
+ Sort Key: dist_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Result (actual rows=4 loops=1)
+ One-Time Filter: $0
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
BEGIN;
EXPLAIN :default_analyze_flags
WITH r AS (
@@ -2486,7 +2506,10 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
- -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: intermediate_result.a2
+ Sort Method: quicksort Memory: 25kB
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
ROLLBACK;
-- https://github.com/citusdata/citus/issues/4074
prepare ref_select(int) AS select * from ref_table where 1 = $1;
diff --git a/src/test/regress/expected/multi_explain_0.out b/src/test/regress/expected/multi_explain_0.out
new file mode 100644
index 000000000..9534cefb8
--- /dev/null
+++ b/src/test/regress/expected/multi_explain_0.out
@@ -0,0 +1,3219 @@
+--
+-- MULTI_EXPLAIN
+--
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
+ server_version_ge_16
+---------------------------------------------------------------------
+ f
+(1 row)
+
+SET citus.next_shard_id TO 570000;
+\a\t
+SET citus.explain_distributed_queries TO on;
+SET citus.enable_repartition_joins to ON;
+-- Ensure tuple data in explain analyze output is the same on all PG versions
+SET citus.enable_binary_protocol = TRUE;
+-- Function that parses explain output as JSON
+CREATE OR REPLACE FUNCTION explain_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+CREATE OR REPLACE FUNCTION explain_analyze_json(query text)
+RETURNS jsonb
+AS $BODY$
+DECLARE
+ result jsonb;
+BEGIN
+ EXECUTE format('EXPLAIN (ANALYZE TRUE, FORMAT JSON) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE OR REPLACE FUNCTION explain_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- Function that parses explain output as XML
+CREATE OR REPLACE FUNCTION explain_analyze_xml(query text)
+RETURNS xml
+AS $BODY$
+DECLARE
+ result xml;
+BEGIN
+ EXECUTE format('EXPLAIN (ANALYZE true, FORMAT XML) %s', query) INTO result;
+ RETURN result;
+END;
+$BODY$ LANGUAGE plpgsql;
+-- VACUMM related tables to ensure test outputs are stable
+VACUUM ANALYZE lineitem;
+VACUUM ANALYZE orders;
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem
+-- Test disable hash aggregate
+SET enable_hashagg TO off;
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> GroupAggregate
+ Group Key: remote_scan.l_quantity
+ -> Sort
+ Sort Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem
+SET enable_hashagg TO on;
+-- Test JSON format
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+[
+ {
+ "Plan": {
+ "Node Type": "Sort",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Sort Key": ["(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))", "remote_scan.l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Group Key": ["remote_scan.l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Custom Scan",
+ "Parent Relationship": "Outer",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 2,
+ "Tasks Shown": "One of 2",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Hashed",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Group Key": ["l_quantity"],
+ "Plans": [
+ {
+ "Node Type": "Seq Scan",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "lineitem_360000",
+ "Alias": "lineitem"
+ }
+ ]
+ }
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+]
+-- Validate JSON format
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+SELECT true AS valid FROM explain_analyze_json($$
+ WITH a AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity LIMIT 10)
+ SELECT count(*) FROM a
+$$);
+t
+-- Test XML format
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+
+
+
+ Sort
+ false
+ false
+
+ - (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ - remote_scan.l_quantity
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ Outer
+ false
+ false
+
+ - remote_scan.l_quantity
+
+
+
+ Custom Scan
+ Outer
+ Citus Adaptive
+ false
+ false
+
+
+ 2
+ One of 2
+
+
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ Aggregate
+ Hashed
+ Simple
+ false
+ false
+
+ - l_quantity
+
+
+
+ Seq Scan
+ Outer
+ false
+ false
+ lineitem_360000
+ lineitem
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+-- Validate XML format
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
+t
+SELECT true AS valid FROM explain_analyze_xml($$
+ WITH a AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity LIMIT 10)
+ SELECT count(*) FROM a
+$$);
+t
+-- Test YAML format
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+- Plan:
+ Node Type: "Sort"
+ Parallel Aware: false
+ Async Capable: false
+ Sort Key:
+ - "(COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))"
+ - "remote_scan.l_quantity"
+ Plans:
+ - Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Group Key:
+ - "remote_scan.l_quantity"
+ Plans:
+ - Node Type: "Custom Scan"
+ Parent Relationship: "Outer"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Distributed Query:
+ Job:
+ Task Count: 2
+ Tasks Shown: "One of 2"
+ Tasks:
+ - Node: "host=localhost port=xxxxx dbname=regression"
+ Remote Plan:
+ - Plan:
+ Node Type: "Aggregate"
+ Strategy: "Hashed"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Group Key:
+ - "l_quantity"
+ Plans:
+ - Node Type: "Seq Scan"
+ Parent Relationship: "Outer"
+ Parallel Aware: false
+ Async Capable: false
+ Relation Name: "lineitem_360000"
+ Alias: "lineitem"
+
+-- Test Text format
+EXPLAIN (COSTS FALSE, FORMAT TEXT)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem
+-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
+SELECT public.plan_normalize_memory($Q$
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+$Q$);
+Sort (actual rows=50 loops=1)
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ Sort Method: quicksort Memory: xxx
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 1800 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1)
+-- EXPLAIN ANALYZE doesn't show worker tasks for repartition joins yet
+SET citus.shard_count TO 3;
+CREATE TABLE t1(a int, b int);
+CREATE TABLE t2(a int, b int);
+SELECT create_distributed_table('t1', 'a'), create_distributed_table('t2', 'a');
+|
+BEGIN;
+SET LOCAL citus.enable_repartition_joins TO true;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 6
+ Tuple data received from nodes: 48 bytes
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+-- Confirm repartiton join in distributed subplan works
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
+WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
+SELECT count(*) from repartition;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 14 bytes
+ Result destination: Write locally
+ -> Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 6
+ Tuple data received from nodes: 48 bytes
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 3
+ Merge Task Count: 6
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+END;
+DROP TABLE t1, t2;
+-- Test query text output, with ANALYZE ON
+SELECT public.plan_normalize_memory($Q$
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+$Q$);
+Sort (actual rows=50 loops=1)
+ Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ Sort Method: quicksort Memory: xxx
+ -> HashAggregate (actual rows=50 loops=1)
+ Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
+ Output: remote_scan.l_quantity, remote_scan.count_quantity
+ Task Count: 2
+ Tuple data received from nodes: 1800 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT l_quantity, count(*) AS count_quantity FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Output: l_quantity, count(*)
+ Group Key: lineitem.l_quantity
+ -> Seq Scan on public.lineitem_360000 lineitem (actual rows=5894 loops=1)
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test query text output, with ANALYZE OFF
+EXPLAIN (COSTS FALSE, ANALYZE FALSE, TIMING FALSE, SUMMARY FALSE, VERBOSE TRUE)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+Sort
+ Output: remote_scan.l_quantity, (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ -> HashAggregate
+ Output: remote_scan.l_quantity, COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_quantity, remote_scan.count_quantity
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT l_quantity, count(*) AS count_quantity FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_quantity, count(*)
+ Group Key: lineitem.l_quantity
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test verbose
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
+Aggregate
+ Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2"
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity) FROM public.lineitem_360000 lineitem WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity)
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test join
+EXPLAIN (COSTS FALSE)
+ SELECT * FROM lineitem
+ JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0
+ ORDER BY l_quantity LIMIT 10;
+Limit
+ -> Sort
+ Sort Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: lineitem.l_quantity
+ -> Hash Join
+ Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
+ -> Seq Scan on lineitem_360000 lineitem
+ Filter: (l_quantity < 5.0)
+ -> Hash
+ -> Seq Scan on orders_360002 orders
+-- Test insert
+EXPLAIN (COSTS FALSE)
+ INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0);
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on lineitem_360000 citus_table_alias
+ -> Values Scan on "*VALUES*"
+-- Test update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
+BEGIN;
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
+ UPDATE lineitem
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem (actual rows=0 loops=1)
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=0 loops=1)
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+ Rows Removed by Filter: 6
+ROLLBACk;
+-- Test delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem
+ WHERE l_orderkey = 1 AND l_partkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_360000 lineitem
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey = 1)
+ Filter: (l_partkey = 0)
+-- Test zero-shard update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 AND l_orderkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 0
+ Tasks Shown: All
+-- Test zero-shard delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem
+ WHERE l_orderkey = 1 AND l_orderkey = 0;
+Custom Scan (Citus Adaptive)
+ Task Count: 0
+ Tasks Shown: All
+-- Test single-shard SELECT
+EXPLAIN (COSTS FALSE)
+ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey = 5)
+SELECT true AS valid FROM explain_xml($$
+ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$);
+t
+-- Test CREATE TABLE ... AS
+EXPLAIN (COSTS FALSE)
+ CREATE TABLE explain_result AS
+ SELECT * FROM lineitem;
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on lineitem_360000 lineitem
+-- Test having
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem
+ HAVING sum(l_quantity) > 100;
+Aggregate
+ Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2")))
+ Filter: (sum(remote_scan.worker_column_4) > '100'::numeric)
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) AS worker_column_4 FROM public.lineitem_360000 lineitem WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity)
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Test having without aggregate
+EXPLAIN (COSTS FALSE, VERBOSE TRUE)
+ SELECT l_quantity FROM lineitem
+ GROUP BY l_quantity
+ HAVING l_quantity > (100 * random());
+HashAggregate
+ Output: remote_scan.l_quantity
+ Group Key: remote_scan.l_quantity
+ Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random()))
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_quantity, remote_scan.worker_column_2
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Query: SELECT l_quantity, l_quantity AS worker_column_2 FROM public.lineitem_360000 lineitem WHERE true GROUP BY l_quantity
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_quantity, l_quantity
+ Group Key: lineitem.l_quantity
+ -> Seq Scan on public.lineitem_360000 lineitem
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+-- Subquery pushdown tests with explain
+EXPLAIN (COSTS OFF)
+SELECT
+ avg(array_length(events, 1)) AS event_average
+FROM
+ (SELECT
+ tenant_id,
+ user_id,
+ array_agg(event_type ORDER BY event_time) AS events
+ FROM
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ event_type,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type IN ('click', 'submit', 'pay')) AS subquery
+ GROUP BY
+ tenant_id,
+ user_id) AS subquery;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Hash Join
+ Hash Cond: (users.composite_id = events.composite_id)
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events
+ Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[]))
+-- Union and left join subquery pushdown
+EXPLAIN (COSTS OFF)
+SELECT
+ avg(array_length(events, 1)) AS event_average,
+ hasdone
+FROM
+ (SELECT
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ array_agg(event ORDER BY event_time) AS events,
+ COALESCE(hasdone, 'Has not done paying') AS hasdone
+ FROM
+ (
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id) as composite_id,
+ 'action=>1'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'click')
+ UNION
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id) as composite_id,
+ 'action=>2'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'submit')
+ ) AS subquery_1
+ LEFT JOIN
+ (SELECT
+ DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id,
+ (composite_id).tenant_id,
+ (composite_id).user_id,
+ 'Has done paying'::TEXT AS hasdone
+ FROM
+ events
+ WHERE
+ events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'pay') AS subquery_2
+ ON
+ subquery_1.composite_id = subquery_2.composite_id
+ GROUP BY
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ hasdone) AS subquery_top
+GROUP BY
+ hasdone;
+HashAggregate
+ Group Key: remote_scan.hasdone
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> GroupAggregate
+ Group Key: subquery_top.hasdone
+ -> Sort
+ Sort Key: subquery_top.hasdone
+ -> Subquery Scan on subquery_top
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
+ -> Hash Left Join
+ Hash Cond: (users.composite_id = subquery_2.composite_id)
+ -> HashAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time
+ -> Append
+ -> Hash Join
+ Hash Cond: (users.composite_id = events.composite_id)
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events
+ Filter: ((event_type)::text = 'click'::text)
+ -> Hash Join
+ Hash Cond: (users_1.composite_id = events_1.composite_id)
+ -> Seq Scan on users_1400289 users_1
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events_1
+ Filter: ((event_type)::text = 'submit'::text)
+ -> Hash
+ -> Subquery Scan on subquery_2
+ -> Unique
+ -> Sort
+ Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id)
+ -> Seq Scan on events_1400285 events_2
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
+-- Union, left join and having subquery pushdown
+EXPLAIN (COSTS OFF)
+ SELECT
+ avg(array_length(events, 1)) AS event_average,
+ count_pay
+ FROM (
+ SELECT
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ array_agg(event ORDER BY event_time) AS events,
+ COALESCE(count_pay, 0) AS count_pay
+ FROM
+ (
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id),
+ 'action=>1'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'click')
+ UNION
+ (SELECT
+ (users.composite_id).tenant_id,
+ (users.composite_id).user_id,
+ (users.composite_id),
+ 'action=>2'AS event,
+ events.event_time
+ FROM
+ users,
+ events
+ WHERE
+ (users.composite_id) = (events.composite_id) AND
+ users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'submit')
+ ) AS subquery_1
+ LEFT JOIN
+ (SELECT
+ (composite_id).tenant_id,
+ (composite_id).user_id,
+ composite_id,
+ COUNT(*) AS count_pay
+ FROM
+ events
+ WHERE
+ events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND
+ event_type = 'pay'
+ GROUP BY
+ composite_id
+ HAVING
+ COUNT(*) > 2) AS subquery_2
+ ON
+ subquery_1.composite_id = subquery_2.composite_id
+ GROUP BY
+ subquery_1.tenant_id,
+ subquery_1.user_id,
+ count_pay) AS subquery_top
+WHERE
+ array_ndims(events) > 0
+GROUP BY
+ count_pay
+ORDER BY
+ count_pay;
+Sort
+ Sort Key: remote_scan.count_pay
+ -> HashAggregate
+ Group Key: remote_scan.count_pay
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> GroupAggregate
+ Group Key: subquery_top.count_pay
+ -> Sort
+ Sort Key: subquery_top.count_pay
+ -> Subquery Scan on subquery_top
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
+ Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
+ -> Hash Left Join
+ Hash Cond: (users.composite_id = subquery_2.composite_id)
+ -> HashAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time
+ -> Append
+ -> Hash Join
+ Hash Cond: (users.composite_id = events.composite_id)
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events
+ Filter: ((event_type)::text = 'click'::text)
+ -> Hash Join
+ Hash Cond: (users_1.composite_id = events_1.composite_id)
+ -> Seq Scan on users_1400289 users_1
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Hash
+ -> Seq Scan on events_1400285 events_1
+ Filter: ((event_type)::text = 'submit'::text)
+ -> Hash
+ -> Subquery Scan on subquery_2
+ -> GroupAggregate
+ Group Key: events_2.composite_id
+ Filter: (count(*) > 2)
+ -> Sort
+ Sort Key: events_2.composite_id
+ -> Seq Scan on events_1400285 events_2
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text))
+-- Lateral join subquery pushdown
+-- set subquery_pushdown due to limit in the query
+SET citus.subquery_pushdown to ON;
+NOTICE: Setting citus.subquery_pushdown flag is discouraged becuase it forces the planner to pushdown certain queries, skipping relevant correctness checks.
+DETAIL: When enabled, the planner skips many correctness checks for subqueries and pushes down the queries to shards as-is. It means that the queries are likely to return wrong results unless the user is absolutely sure that pushing down the subquery is safe. This GUC is maintained only for backward compatibility, no new users are supposed to use it. The planner is capable of pushing down as much computation as possible to the shards depending on the query.
+EXPLAIN (COSTS OFF)
+SELECT
+ tenant_id,
+ user_id,
+ user_lastseen,
+ event_array
+FROM
+ (SELECT
+ tenant_id,
+ user_id,
+ max(lastseen) as user_lastseen,
+ array_agg(event_type ORDER BY event_time) AS event_array
+ FROM
+ (SELECT
+ (composite_id).tenant_id,
+ (composite_id).user_id,
+ composite_id,
+ lastseen
+ FROM
+ users
+ WHERE
+ composite_id >= '(1, -9223372036854775808)'::user_composite_type AND
+ composite_id <= '(1, 9223372036854775807)'::user_composite_type
+ ORDER BY
+ lastseen DESC
+ LIMIT
+ 10
+ ) AS subquery_top
+ LEFT JOIN LATERAL
+ (SELECT
+ event_type,
+ event_time
+ FROM
+ events
+ WHERE
+ (composite_id) = subquery_top.composite_id
+ ORDER BY
+ event_time DESC
+ LIMIT
+ 99) AS subquery_lateral
+ ON
+ true
+ GROUP BY
+ tenant_id,
+ user_id
+ ) AS shard_union
+ORDER BY
+ user_lastseen DESC
+LIMIT
+ 10;
+Limit
+ -> Sort
+ Sort Key: remote_scan.user_lastseen DESC
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Sort
+ Sort Key: (max(users.lastseen)) DESC
+ -> GroupAggregate
+ Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Sort
+ Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
+ -> Nested Loop Left Join
+ -> Limit
+ -> Sort
+ Sort Key: users.lastseen DESC
+ -> Seq Scan on users_1400289 users
+ Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type))
+ -> Limit
+ -> Sort
+ Sort Key: events.event_time DESC
+ -> Seq Scan on events_1400285 events
+ Filter: (composite_id = users.composite_id)
+RESET citus.subquery_pushdown;
+-- Test all tasks output
+SET citus.explain_all_tasks TO on;
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360001 on lineitem_360001 lineitem
+ Index Cond: (l_orderkey > 9030)
+SELECT true AS valid FROM explain_xml($$
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
+t
+SELECT true AS valid FROM explain_json($$
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$);
+t
+-- Test multi shard update
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_hash_part
+ SET l_suppkey = 12;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360042 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360043 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360044 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_hash_part
+ SET l_suppkey = 12
+ WHERE l_orderkey = 1 OR l_orderkey = 3;
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ Filter: ((l_orderkey = 1) OR (l_orderkey = 3))
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360042 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
+ Filter: ((l_orderkey = 1) OR (l_orderkey = 3))
+-- Test multi shard delete
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_hash_part;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360042 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360042 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360043 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360043 lineitem_hash_part
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360044 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360044 lineitem_hash_part
+-- Test analyze (with TIMING FALSE and SUMMARY FALSE for consistent output)
+SELECT public.plan_normalize_memory($Q$
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE)
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
+$Q$);
+Sort (actual rows=50 loops=1)
+ Sort Key: (COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint)), remote_scan.l_quantity
+ Sort Method: quicksort Memory: xxx
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: remote_scan.l_quantity
+ -> Custom Scan (Citus Adaptive) (actual rows=100 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 1800 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1)
+ -> Task
+ Tuple data received from node: 900 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate (actual rows=50 loops=1)
+ Group Key: l_quantity
+ -> Seq Scan on lineitem_360001 lineitem (actual rows=6106 loops=1)
+SET citus.explain_all_tasks TO off;
+-- Test update with subquery
+EXPLAIN (COSTS FALSE)
+ UPDATE lineitem_hash_part
+ SET l_suppkey = 12
+ FROM orders_hash_part
+ WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash Join
+ Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey)
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+-- Test delete with subquery
+EXPLAIN (COSTS FALSE)
+ DELETE FROM lineitem_hash_part
+ USING orders_hash_part
+ WHERE orders_hash_part.o_orderkey = lineitem_hash_part.l_orderkey;
+Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash Join
+ Hash Cond: (lineitem_hash_part.l_orderkey = orders_hash_part.o_orderkey)
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ -> Hash
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+-- Test track tracker
+EXPLAIN (COSTS FALSE)
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+-- Test re-partition join
+EXPLAIN (COSTS FALSE)
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 6
+ Tasks Shown: None, not supported for re-partition queries
+ -> MapMergeJob
+ Map Task Count: 6
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 2
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 6
+ -> MapMergeJob
+ Map Task Count: 1
+ Merge Task Count: 6
+EXPLAIN (COSTS FALSE, FORMAT JSON)
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+[
+ {
+ "Plan": {
+ "Node Type": "Aggregate",
+ "Strategy": "Plain",
+ "Partial Mode": "Simple",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Plans": [
+ {
+ "Node Type": "Custom Scan",
+ "Parent Relationship": "Outer",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 6,
+ "Tasks Shown": "None, not supported for re-partition queries",
+ "Dependent Jobs": [
+ {
+ "Map Task Count": 6,
+ "Merge Task Count": 6,
+ "Dependent Jobs": [
+ {
+ "Map Task Count": 2,
+ "Merge Task Count": 6
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 6
+ }
+ ]
+ },
+ {
+ "Map Task Count": 1,
+ "Merge Task Count": 6
+ }
+ ]
+ }
+ }
+ }
+ ]
+ }
+ }
+]
+SELECT true AS valid FROM explain_json($$
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT XML)
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+
+
+
+ Aggregate
+ Plain
+ Simple
+ false
+ false
+
+
+ Custom Scan
+ Outer
+ Citus Adaptive
+ false
+ false
+
+
+ 6
+ None, not supported for re-partition queries
+
+
+ 6
+ 6
+
+
+ 2
+ 6
+
+
+ 1
+ 6
+
+
+
+
+ 1
+ 6
+
+
+
+
+
+
+
+
+
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM lineitem, orders, customer_append, supplier
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey$$);
+t
+-- make sure that EXPLAIN works without
+-- problems for queries that inlvolves only
+-- reference tables
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM nation
+ WHERE n_name = 'CHINA'$$);
+t
+SELECT true AS valid FROM explain_xml($$
+ SELECT count(*)
+ FROM nation, supplier
+ WHERE nation.n_nationkey = supplier.s_nationkey$$);
+t
+EXPLAIN (COSTS FALSE, FORMAT YAML)
+ SELECT count(*)
+ FROM lineitem, orders, customer, supplier_single_shard
+ WHERE l_orderkey = o_orderkey
+ AND o_custkey = c_custkey
+ AND l_suppkey = s_suppkey;
+- Plan:
+ Node Type: "Aggregate"
+ Strategy: "Plain"
+ Partial Mode: "Simple"
+ Parallel Aware: false
+ Async Capable: false
+ Plans:
+ - Node Type: "Custom Scan"
+ Parent Relationship: "Outer"
+ Custom Plan Provider: "Citus Adaptive"
+ Parallel Aware: false
+ Async Capable: false
+ Distributed Query:
+ Job:
+ Task Count: 6
+ Tasks Shown: "None, not supported for re-partition queries"
+ Dependent Jobs:
+ - Map Task Count: 2
+ Merge Task Count: 6
+ - Map Task Count: 1
+ Merge Task Count: 6
+-- ensure local plans display correctly
+CREATE TABLE lineitem_clone (LIKE lineitem);
+EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
+Aggregate
+ -> Seq Scan on lineitem_clone
+DROP TABLE lineitem_clone;
+-- ensure distributed plans don't break
+EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Seq Scan on lineitem_360000 lineitem
+-- ensure EXPLAIN EXECUTE doesn't crash
+PREPARE task_tracker_query AS
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5;
+EXPLAIN EXECUTE router_executor_query;
+Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
+ Index Cond: (l_orderkey = 5)
+PREPARE real_time_executor_query AS
+ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
+EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query;
+Aggregate
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate
+ -> Index Only Scan using lineitem_pkey_360000 on lineitem_360000 lineitem
+ Index Cond: (l_orderkey > 9030)
+-- EXPLAIN EXECUTE of parametrized prepared statements is broken, but
+-- at least make sure to fail without crashing
+PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1;
+EXPLAIN EXECUTE router_executor_query_param(5);
+Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (cost=0.28..13.60 rows=4 width=5)
+ Index Cond: (l_orderkey = 5)
+EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE router_executor_query_param(5);
+Custom Scan (Citus Adaptive) (actual rows=3 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 30 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 30 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Index Scan using lineitem_pkey_360000 on lineitem_360000 lineitem (actual rows=3 loops=1)
+ Index Cond: (l_orderkey = 5)
+\set VERBOSITY TERSE
+PREPARE multi_shard_query_param(int) AS UPDATE lineitem SET l_quantity = $1;
+BEGIN;
+EXPLAIN (COSTS OFF) EXECUTE multi_shard_query_param(5);
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem
+ -> Seq Scan on lineitem_360000 lineitem
+ROLLBACK;
+BEGIN;
+EXPLAIN (ANALYZE ON, COSTS OFF, TIMING OFF, SUMMARY OFF) EXECUTE multi_shard_query_param(5);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on lineitem_360000 lineitem (actual rows=0 loops=1)
+ -> Seq Scan on lineitem_360000 lineitem (actual rows=5894 loops=1)
+ROLLBACK;
+\set VERBOSITY DEFAULT
+-- test explain in a transaction with alter table to test we use right connections
+BEGIN;
+CREATE TABLE explain_table(id int);
+SELECT create_distributed_table('explain_table', 'id');
+
+ALTER TABLE explain_table ADD COLUMN value int;
+ROLLBACK;
+-- test explain with local INSERT ... SELECT
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part
+SELECT o_orderkey FROM orders_hash_part LIMIT 3;
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Limit
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+SELECT true AS valid FROM explain_json($$
+ INSERT INTO lineitem_hash_part (l_orderkey)
+ SELECT o_orderkey FROM orders_hash_part LIMIT 3;
+$$);
+t
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part (l_orderkey, l_quantity)
+SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3;
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Limit
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit
+ -> Seq Scan on orders_hash_part_360045 orders_hash_part
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part (l_orderkey)
+SELECT s FROM generate_series(1,5) s;
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Function Scan on generate_series s
+-- WHERE EXISTS forces pg12 to materialize cte
+EXPLAIN (COSTS OFF)
+WITH cte1 AS (SELECT s FROM generate_series(1,10) s)
+INSERT INTO lineitem_hash_part
+WITH cte1 AS (SELECT * FROM cte1 WHERE EXISTS (SELECT * FROM cte1) LIMIT 5)
+SELECT s FROM cte1 WHERE EXISTS (SELECT * FROM cte1);
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Result
+ One-Time Filter: $3
+ CTE cte1
+ -> Function Scan on generate_series s
+ CTE cte1
+ -> Limit
+ InitPlan 2 (returns $1)
+ -> CTE Scan on cte1 cte1_1
+ -> Result
+ One-Time Filter: $1
+ -> CTE Scan on cte1 cte1_2
+ InitPlan 4 (returns $3)
+ -> CTE Scan on cte1 cte1_3
+ -> CTE Scan on cte1
+EXPLAIN (COSTS OFF)
+INSERT INTO lineitem_hash_part
+( SELECT s FROM generate_series(1,5) s) UNION
+( SELECT s FROM generate_series(5,10) s);
+Custom Scan (Citus INSERT ... SELECT)
+ INSERT/SELECT method: pull to coordinator
+ -> Subquery Scan on citus_insert_select_subquery
+ -> HashAggregate
+ Group Key: s.s
+ -> Append
+ -> Function Scan on generate_series s
+ -> Function Scan on generate_series s_1
+-- explain with recursive planning
+EXPLAIN (COSTS OFF, VERBOSE true)
+WITH keys AS MATERIALIZED (
+ SELECT DISTINCT l_orderkey FROM lineitem_hash_part
+),
+series AS MATERIALIZED (
+ SELECT s FROM generate_series(1,10) s
+)
+SELECT l_orderkey FROM series JOIN keys ON (s = l_orderkey)
+ORDER BY s;
+Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey
+ -> Distributed Subplan XXX_1
+ -> HashAggregate
+ Output: remote_scan.l_orderkey
+ Group Key: remote_scan.l_orderkey
+ -> Custom Scan (Citus Adaptive)
+ Output: remote_scan.l_orderkey
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Query: SELECT DISTINCT l_orderkey FROM public.lineitem_hash_part_360041 lineitem_hash_part WHERE true
+ Node: host=localhost port=xxxxx dbname=regression
+ -> HashAggregate
+ Output: l_orderkey
+ Group Key: lineitem_hash_part.l_orderkey
+ -> Seq Scan on public.lineitem_hash_part_360041 lineitem_hash_part
+ Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
+ -> Distributed Subplan XXX_2
+ -> Function Scan on pg_catalog.generate_series s
+ Output: s
+ Function Call: generate_series(1, 10)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Query: SELECT keys.l_orderkey FROM ((SELECT intermediate_result.s FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(s integer)) series JOIN (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) keys ON ((series.s OPERATOR(pg_catalog.=) keys.l_orderkey))) ORDER BY series.s
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Merge Join
+ Output: intermediate_result_1.l_orderkey, intermediate_result.s
+ Merge Cond: (intermediate_result.s = intermediate_result_1.l_orderkey)
+ -> Sort
+ Output: intermediate_result.s
+ Sort Key: intermediate_result.s
+ -> Function Scan on pg_catalog.read_intermediate_result intermediate_result
+ Output: intermediate_result.s
+ Function Call: read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format)
+ -> Sort
+ Output: intermediate_result_1.l_orderkey
+ Sort Key: intermediate_result_1.l_orderkey
+ -> Function Scan on pg_catalog.read_intermediate_result intermediate_result_1
+ Output: intermediate_result_1.l_orderkey
+ Function Call: read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format)
+SELECT true AS valid FROM explain_json($$
+ WITH result AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity
+ ),
+ series AS (
+ SELECT s FROM generate_series(1,10) s
+ )
+ SELECT * FROM result JOIN series ON (s = count_quantity) JOIN orders_hash_part ON (s = o_orderkey)
+$$);
+t
+SELECT true AS valid FROM explain_xml($$
+ WITH result AS (
+ SELECT l_quantity, count(*) count_quantity FROM lineitem
+ GROUP BY l_quantity ORDER BY count_quantity, l_quantity
+ ),
+ series AS (
+ SELECT s FROM generate_series(1,10) s
+ )
+ SELECT * FROM result JOIN series ON (s = l_quantity) JOIN orders_hash_part ON (s = o_orderkey)
+$$);
+t
+--
+-- Test EXPLAIN ANALYZE udfs
+--
+\a\t
+\set default_opts '''{"costs": false, "timing": false, "summary": false}'''::jsonb
+CREATE TABLE explain_analyze_test(a int, b text);
+INSERT INTO explain_analyze_test VALUES (1, 'value 1'), (2, 'value 2'), (3, 'value 3'), (4, 'value 4');
+-- simple select
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', :default_opts) as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Result (actual rows=1 loops=1)+
+
+(1 row)
+
+END;
+-- insert into select
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i $Q$,
+ :default_opts) as (a int);
+ a
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Insert on explain_analyze_test (actual rows=0 loops=1) +
+ -> Function Scan on generate_series i (actual rows=5 loops=1)+
+
+(1 row)
+
+ROLLBACK;
+-- select from table
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$SELECT * FROM explain_analyze_test$Q$,
+ :default_opts) as (a int, b text);
+ a | b
+---------------------------------------------------------------------
+ 1 | value 1
+ 2 | value 2
+ 3 | value 3
+ 4 | value 4
+(4 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Seq Scan on explain_analyze_test (actual rows=4 loops=1)+
+
+(1 row)
+
+ROLLBACK;
+-- insert into with returning
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ INSERT INTO explain_analyze_test SELECT i, i::text FROM generate_series(1, 5) i
+ RETURNING a, b$Q$,
+ :default_opts) as (a int, b text);
+ a | b
+---------------------------------------------------------------------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+(5 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Insert on explain_analyze_test (actual rows=5 loops=1) +
+ -> Function Scan on generate_series i (actual rows=5 loops=1)+
+
+(1 row)
+
+ROLLBACK;
+-- delete with returning
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ DELETE FROM explain_analyze_test WHERE a % 2 = 0
+ RETURNING a, b$Q$,
+ :default_opts) as (a int, b text);
+ a | b
+---------------------------------------------------------------------
+ 2 | value 2
+ 4 | value 4
+(2 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Delete on explain_analyze_test (actual rows=2 loops=1) +
+ -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+
+ Filter: ((a % 2) = 0) +
+ Rows Removed by Filter: 2 +
+
+(1 row)
+
+ROLLBACK;
+-- delete without returning
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze($Q$
+ DELETE FROM explain_analyze_test WHERE a % 2 = 0$Q$,
+ :default_opts) as (a int);
+ a
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Delete on explain_analyze_test (actual rows=0 loops=1) +
+ -> Seq Scan on explain_analyze_test (actual rows=2 loops=1)+
+ Filter: ((a % 2) = 0) +
+ Rows Removed by Filter: 2 +
+
+(1 row)
+
+ROLLBACK;
+-- multiple queries (should ERROR)
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1; SELECT 2', :default_opts) as (a int);
+ERROR: cannot EXPLAIN ANALYZE multiple queries
+-- error in query
+SELECT * FROM worker_save_query_explain_analyze('SELECT x', :default_opts) as (a int);
+ERROR: column "x" does not exist
+-- error in format string
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "invlaid_format"}') as (a int);
+ERROR: Invalid explain analyze format: "invlaid_format"
+-- test formats
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "text", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ Result (actual rows=1 loops=1)+
+
+(1 row)
+
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "json", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ [ +
+ { +
+ "Plan": { +
+ "Node Type": "Result", +
+ "Parallel Aware": false,+
+ "Async Capable": false, +
+ "Actual Rows": 1, +
+ "Actual Loops": 1 +
+ }, +
+ "Triggers": [ +
+ ] +
+ } +
+ ]
+(1 row)
+
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "xml", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ +
+ +
+ +
+ Result +
+ false +
+ false +
+ 1 +
+ 1 +
+ +
+ +
+ +
+ +
+
+(1 row)
+
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"format": "yaml", "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output FROM worker_last_saved_explain_analyze();
+ explain_analyze_output
+---------------------------------------------------------------------
+ - Plan: +
+ Node Type: "Result" +
+ Parallel Aware: false+
+ Async Capable: false +
+ Actual Rows: 1 +
+ Actual Loops: 1 +
+ Triggers:
+(1 row)
+
+END;
+-- costs on, timing off
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Seq Scan.*\(cost=0.00.*\) \(actual rows.*\)' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- costs off, timing on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": true, "costs": false}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Seq Scan on explain_analyze_test \(actual time=.* rows=.* loops=1\)' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- summary on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{"timing": false, "costs": false, "summary": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT explain_analyze_output ~ 'Planning Time:.*Execution Time:.*' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- buffers on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "buffers": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Buffers:' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- verbose on
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('SELECT * FROM explain_analyze_test', '{"timing": false, "costs": false, "verbose": true}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+SELECT explain_analyze_output ~ 'Output: a, b' FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+-- make sure deleted at transaction end
+SELECT * FROM worker_save_query_explain_analyze('SELECT 1', '{}') as (a int);
+ a
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT count(*) FROM worker_last_saved_explain_analyze();
+ count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+-- should be deleted at the end of prepare commit
+BEGIN;
+SELECT * FROM worker_save_query_explain_analyze('UPDATE explain_analyze_test SET a=6 WHERE a=4', '{}') as (a int);
+ a
+---------------------------------------------------------------------
+(0 rows)
+
+SELECT count(*) FROM worker_last_saved_explain_analyze();
+ count
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+PREPARE TRANSACTION 'citus_0_1496350_7_0';
+SELECT count(*) FROM worker_last_saved_explain_analyze();
+ count
+---------------------------------------------------------------------
+ 0
+(1 row)
+
+COMMIT PREPARED 'citus_0_1496350_7_0';
+-- verify execution time makes sense
+BEGIN;
+SELECT count(*) FROM worker_save_query_explain_analyze('SELECT pg_sleep(0.05)', :default_opts) as (a int);
+ count
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT execution_duration BETWEEN 30 AND 200 FROM worker_last_saved_explain_analyze();
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+END;
+--
+-- verify we handle parametrized queries properly
+--
+CREATE TABLE t(a int);
+INSERT INTO t VALUES (1), (2), (3);
+-- simple case
+PREPARE save_explain AS
+SELECT $1, * FROM worker_save_query_explain_analyze('SELECT $1::int', :default_opts) as (a int);
+EXECUTE save_explain(1);
+ ?column? | a
+---------------------------------------------------------------------
+ 1 | 1
+(1 row)
+
+deallocate save_explain;
+-- Call a UDF first to make sure that we handle stacks of executorBoundParams properly.
+--
+-- The prepared statement will first call f() which will force new executor run with new
+-- set of parameters. Then it will call worker_save_query_explain_analyze with a
+-- parametrized query. If we don't have the correct set of parameters here, it will fail.
+CREATE FUNCTION f() RETURNS INT
+AS $$
+PREPARE pp1 AS SELECT $1 WHERE $2 = $3;
+EXECUTE pp1(4, 5, 5);
+deallocate pp1;
+SELECT 1$$ LANGUAGE sql volatile;
+PREPARE save_explain AS
+ SELECT $1, CASE WHEN i < 2 THEN
+ f() = 1
+ ELSE
+ EXISTS(SELECT * FROM worker_save_query_explain_analyze('SELECT $1::int', :default_opts) as (a int)
+ WHERE a = 1)
+ END
+ FROM generate_series(1, 4) i;
+EXECUTE save_explain(1);
+ ?column? | exists
+---------------------------------------------------------------------
+ 1 | t
+ 1 | t
+ 1 | t
+ 1 | t
+(4 rows)
+
+deallocate save_explain;
+DROP FUNCTION f();
+DROP TABLE t;
+SELECT * FROM explain_analyze_test ORDER BY a;
+ a | b
+---------------------------------------------------------------------
+ 1 | value 1
+ 2 | value 2
+ 3 | value 3
+ 6 | value 4
+(4 rows)
+
+\a\t
+--
+-- Test different cases of EXPLAIN ANALYZE
+--
+SET citus.shard_count TO 4;
+SET client_min_messages TO WARNING;
+SELECT create_distributed_table('explain_analyze_test', 'a');
+
+\set default_analyze_flags '(ANALYZE on, COSTS off, TIMING off, SUMMARY off)'
+\set default_explain_flags '(ANALYZE off, COSTS off, TIMING off, SUMMARY off)'
+-- router SELECT
+EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 1;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 11 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 11 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = 1)
+-- multi-shard SELECT
+EXPLAIN :default_analyze_flags SELECT count(*) FROM explain_analyze_test;
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 32 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+-- empty router SELECT
+EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE a = 10000;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (a = 10000)
+ Rows Removed by Filter: 1
+-- empty multi-shard SELECT
+EXPLAIN :default_analyze_flags SELECT * FROM explain_analyze_test WHERE b = 'does not exist';
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (b = 'does not exist'::text)
+ Rows Removed by Filter: 1
+-- router DML
+BEGIN;
+EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test WHERE a = 1;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = 1)
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a = 2;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = 2)
+SELECT * FROM explain_analyze_test ORDER BY a;
+2|b
+3|value 3
+6|value 4
+ROLLBACK;
+-- multi-shard DML
+BEGIN;
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'b' WHERE a IN (1, 2);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a = ANY ('{1,2}'::integer[]))
+EXPLAIN :default_analyze_flags DELETE FROM explain_analyze_test;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+SELECT * FROM explain_analyze_test ORDER BY a;
+ROLLBACK;
+-- router DML with RETURNING with empty result
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE a = 10000 RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570012 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (a = 10000)
+ Rows Removed by Filter: 1
+-- multi-shard DML with RETURNING with empty result
+EXPLAIN :default_analyze_flags UPDATE explain_analyze_test SET b = 'something' WHERE b = 'does not exist' RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=0 loops=1)
+ Filter: (b = 'does not exist'::text)
+ Rows Removed by Filter: 1
+-- single-row insert
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_analyze_test_570009 (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+ROLLBACK;
+-- multi-row insert
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test VALUES (5, 'value 5'), (6, 'value 6');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+ROLLBACK;
+-- distributed insert/select
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_analyze_test SELECT * FROM explain_analyze_test;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_analyze_test_570009 citus_table_alias (actual rows=0 loops=1)
+ -> Seq Scan on explain_analyze_test_570009 explain_analyze_test (actual rows=1 loops=1)
+ Filter: (a IS NOT NULL)
+ROLLBACK;
+DROP TABLE explain_analyze_test;
+-- test EXPLAIN ANALYZE works fine with primary keys
+CREATE TABLE explain_pk(a int primary key, b int);
+SELECT create_distributed_table('explain_pk', 'a');
+
+BEGIN;
+EXPLAIN :default_analyze_flags INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on explain_pk_570013 citus_table_alias (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+SELECT * FROM explain_pk ORDER BY 1;
+1|2
+2|3
+ROLLBACK;
+-- test EXPLAIN ANALYZE with non-text output formats
+BEGIN;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+[
+ {
+ "Plan": {
+ "Node Type": "Custom Scan",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Actual Rows": 0,
+ "Actual Loops": 1,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 2,
+ "Tasks Shown": "One of 2",
+ "Tasks": [
+ {
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "ModifyTable",
+ "Operation": "Insert",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "explain_pk_570013",
+ "Alias": "citus_table_alias",
+ "Actual Rows": 0,
+ "Actual Loops": 1,
+ "Plans": [
+ {
+ "Node Type": "Result",
+ "Parent Relationship": "Outer",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Actual Rows": 1,
+ "Actual Loops": 1
+ }
+ ]
+ },
+ "Triggers": [
+ ]
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "Triggers": [
+ ]
+ }
+]
+ROLLBACK;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT JSON) SELECT * FROM explain_pk;
+[
+ {
+ "Plan": {
+ "Node Type": "Custom Scan",
+ "Custom Plan Provider": "Citus Adaptive",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Actual Rows": 0,
+ "Actual Loops": 1,
+ "Distributed Query": {
+ "Job": {
+ "Task Count": 4,
+ "Tuple data received from nodes": "0 bytes",
+ "Tasks Shown": "One of 4",
+ "Tasks": [
+ {
+ "Tuple data received from node": "0 bytes",
+ "Node": "host=localhost port=xxxxx dbname=regression",
+ "Remote Plan": [
+ [
+ {
+ "Plan": {
+ "Node Type": "Seq Scan",
+ "Parallel Aware": false,
+ "Async Capable": false,
+ "Relation Name": "explain_pk_570013",
+ "Alias": "explain_pk",
+ "Actual Rows": 0,
+ "Actual Loops": 1
+ },
+ "Triggers": [
+ ]
+ }
+ ]
+
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "Triggers": [
+ ]
+ }
+]
+BEGIN;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) INSERT INTO explain_pk VALUES (1, 2), (2, 3);
+
+
+
+ Custom Scan
+ Citus Adaptive
+ false
+ false
+ 0
+ 1
+
+
+ 2
+ One of 2
+
+
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ ModifyTable
+ Insert
+ false
+ false
+ explain_pk_570013
+ citus_table_alias
+ 0
+ 1
+
+
+ Result
+ Outer
+ false
+ false
+ 1
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ROLLBACK;
+EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off, FORMAT XML) SELECT * FROM explain_pk;
+
+
+
+ Custom Scan
+ Citus Adaptive
+ false
+ false
+ 0
+ 1
+
+
+ 4
+ 0 bytes
+ One of 4
+
+
+ 0 bytes
+ host=localhost port=xxxxx dbname=regression
+
+
+
+
+ Seq Scan
+ false
+ false
+ explain_pk_570013
+ explain_pk
+ 0
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+DROP TABLE explain_pk;
+-- test EXPLAIN ANALYZE with CTEs and subqueries
+CREATE TABLE dist_table(a int, b int);
+SELECT create_distributed_table('dist_table', 'a');
+
+CREATE TABLE ref_table(a int);
+SELECT create_reference_table('ref_table');
+
+INSERT INTO dist_table SELECT i, i*i FROM generate_series(1, 10) i;
+INSERT INTO ref_table SELECT i FROM generate_series(1, 10) i;
+EXPLAIN :default_analyze_flags
+WITH r AS (
+ SELECT GREATEST(random(), 2) r, a FROM dist_table
+)
+SELECT count(distinct a) from r NATURAL JOIN ref_table;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 220 bytes
+ Result destination: Send to 3 nodes
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 120 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 48 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Hash Join (actual rows=10 loops=1)
+ Hash Cond: (ref_table.a = intermediate_result.a)
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+ -> Hash (actual rows=10 loops=1)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+EXPLAIN :default_analyze_flags
+SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 32 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Merge Join (actual rows=4 loops=1)
+ Merge Cond: (t.a = ref_table.a)
+ -> Sort (actual rows=4 loops=1)
+ Sort Key: t.a
+ Sort Method: quicksort Memory: 25kB
+ -> Subquery Scan on t (actual rows=4 loops=1)
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: ref_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+EXPLAIN :default_analyze_flags
+SELECT count(distinct a) FROM dist_table
+WHERE EXISTS(SELECT random() < 2 FROM dist_table NATURAL JOIN ref_table);
+Aggregate (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=4 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 70 bytes
+ Result destination: Send to 2 nodes
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 10 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Merge Join (actual rows=4 loops=1)
+ Merge Cond: (dist_table.a = ref_table.a)
+ -> Sort (actual rows=4 loops=1)
+ Sort Key: dist_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+ -> Sort (actual rows=10 loops=1)
+ Sort Key: ref_table.a
+ Sort Method: quicksort Memory: 25kB
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 32 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ InitPlan 1 (returns $0)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+ -> Result (actual rows=4 loops=1)
+ One-Time Filter: $0
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
+BEGIN;
+EXPLAIN :default_analyze_flags
+WITH r AS (
+ INSERT INTO dist_table SELECT a, a * a FROM dist_table
+ RETURNING a
+), s AS (
+ SELECT random() < 2, a * a a2 FROM r
+)
+SELECT count(distinct a2) FROM s;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 100 bytes
+ Result destination: Write locally
+ -> Custom Scan (Citus Adaptive) (actual rows=20 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 160 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 64 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on dist_table_570017 citus_table_alias (actual rows=8 loops=1)
+ -> Seq Scan on dist_table_570017 dist_table (actual rows=8 loops=1)
+ Filter: (a IS NOT NULL)
+ -> Distributed Subplan XXX_2
+ Intermediate Data Size: 150 bytes
+ Result destination: Write locally
+ -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 50 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 50 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 8 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 8 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Aggregate (actual rows=1 loops=1)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
+ROLLBACK;
+-- https://github.com/citusdata/citus/issues/4074
+prepare ref_select(int) AS select * from ref_table where 1 = $1;
+explain :default_analyze_flags execute ref_select(1);
+Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 40 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 40 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Result (actual rows=10 loops=1)
+ One-Time Filter: (1 = $1)
+ -> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
+deallocate ref_select;
+DROP TABLE ref_table, dist_table;
+-- test EXPLAIN ANALYZE with different replication factors
+SET citus.shard_count = 2;
+SET citus.shard_replication_factor = 1;
+CREATE TABLE dist_table_rep1(a int);
+SELECT create_distributed_table('dist_table_rep1', 'a');
+
+SET citus.shard_replication_factor = 2;
+CREATE TABLE dist_table_rep2(a int);
+SELECT create_distributed_table('dist_table_rep2', 'a');
+
+EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep1 VALUES(1), (2), (3), (4), (10), (100) RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on dist_table_rep1_570022 citus_table_alias (actual rows=4 loops=1)
+ -> Values Scan on "*VALUES*" (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags SELECT * from dist_table_rep1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags INSERT INTO dist_table_rep2 VALUES(1), (2), (3), (4), (10), (100) RETURNING *;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 48 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 32 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on dist_table_rep2_570024 citus_table_alias (actual rows=4 loops=1)
+ -> Values Scan on "*VALUES*" (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags SELECT * from dist_table_rep2;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep2_570024 dist_table_rep2 (actual rows=4 loops=1)
+prepare p1 as SELECT * FROM dist_table_rep1;
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+EXPLAIN :default_analyze_flags EXECUTE p1;
+Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 24 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=4 loops=1)
+prepare p2 AS SELECT * FROM dist_table_rep1 WHERE a = $1;
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(1);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(10);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 10)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p2(100);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570023 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 100)
+ Rows Removed by Filter: 1
+prepare p3 AS SELECT * FROM dist_table_rep1 WHERE a = 1;
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+EXPLAIN :default_analyze_flags EXECUTE p3;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on dist_table_rep1_570022 dist_table_rep1 (actual rows=1 loops=1)
+ Filter: (a = 1)
+ Rows Removed by Filter: 3
+DROP TABLE dist_table_rep1, dist_table_rep2;
+-- https://github.com/citusdata/citus/issues/2009
+CREATE TABLE simple (id integer, name text);
+SELECT create_distributed_table('simple', 'id');
+
+PREPARE simple_router AS SELECT *, $1 FROM simple WHERE id = 1;
+EXPLAIN :default_explain_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+EXPLAIN :default_analyze_flags EXECUTE simple_router(1);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (id = 1)
+deallocate simple_router;
+-- prepared multi-row insert
+PREPARE insert_query AS INSERT INTO simple VALUES ($1, 2), (2, $2);
+EXPLAIN :default_explain_flags EXECUTE insert_query(3, 4);
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias
+ -> Result
+EXPLAIN :default_analyze_flags EXECUTE insert_query(3, 4);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias (actual rows=0 loops=1)
+ -> Result (actual rows=1 loops=1)
+deallocate insert_query;
+-- prepared updates
+PREPARE update_query AS UPDATE simple SET name=$1 WHERE name=$2;
+EXPLAIN :default_explain_flags EXECUTE update_query('x', 'y');
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on simple_570026 simple
+ -> Seq Scan on simple_570026 simple
+ Filter: (name = 'y'::text)
+EXPLAIN :default_analyze_flags EXECUTE update_query('x', 'y');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Update on simple_570026 simple (actual rows=0 loops=1)
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: (name = $2)
+ Rows Removed by Filter: 1
+deallocate update_query;
+-- prepared deletes
+PREPARE delete_query AS DELETE FROM simple WHERE name=$1 OR name=$2;
+EXPLAIN (COSTS OFF) EXECUTE delete_query('x', 'y');
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on simple_570026 simple
+ -> Seq Scan on simple_570026 simple
+ Filter: ((name = 'x'::text) OR (name = 'y'::text))
+EXPLAIN :default_analyze_flags EXECUTE delete_query('x', 'y');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on simple_570026 simple (actual rows=0 loops=1)
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: ((name = $1) OR (name = $2))
+ Rows Removed by Filter: 1
+deallocate delete_query;
+-- prepared distributed insert/select
+-- we don't support EXPLAIN for prepared insert/selects of other types.
+PREPARE distributed_insert_select AS INSERT INTO simple SELECT * FROM simple WHERE name IN ($1, $2);
+EXPLAIN :default_explain_flags EXECUTE distributed_insert_select('x', 'y');
+Custom Scan (Citus Adaptive)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias
+ -> Seq Scan on simple_570026 simple
+ Filter: ((id IS NOT NULL) AND (name = ANY ('{x,y}'::text[])))
+EXPLAIN :default_analyze_flags EXECUTE distributed_insert_select('x', 'y');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on simple_570026 citus_table_alias (actual rows=0 loops=1)
+ -> Seq Scan on simple_570026 simple (actual rows=0 loops=1)
+ Filter: ((id IS NOT NULL) AND (name = ANY (ARRAY[$1, $2])))
+ Rows Removed by Filter: 1
+deallocate distributed_insert_select;
+DROP TABLE simple;
+-- prepared cte
+BEGIN;
+PREPARE cte_query AS
+WITH keys AS (
+ SELECT count(*) FROM
+ (SELECT DISTINCT l_orderkey, GREATEST(random(), 2) FROM lineitem_hash_part WHERE l_quantity > $1) t
+),
+series AS (
+ SELECT s FROM generate_series(1, $2) s
+),
+delete_result AS (
+ DELETE FROM lineitem_hash_part WHERE l_quantity < $3 RETURNING *
+)
+SELECT s FROM series;
+EXPLAIN :default_explain_flags EXECUTE cte_query(2, 10, -1);
+Custom Scan (Citus Adaptive)
+ -> Distributed Subplan XXX_1
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+ Tasks Shown: One of 4
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
+ Filter: (l_quantity < '-1'::numeric)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on generate_series s
+EXPLAIN :default_analyze_flags EXECUTE cte_query(2, 10, -1);
+Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 0 bytes
+ Result destination: Send to 0 nodes
+ -> Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 4
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 4
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Delete on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
+ -> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part (actual rows=0 loops=1)
+ Filter: (l_quantity < '-1'::numeric)
+ Rows Removed by Filter: 2885
+ Task Count: 1
+ Tuple data received from nodes: 40 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 40 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Function Scan on generate_series s (actual rows=10 loops=1)
+ROLLBACK;
+-- https://github.com/citusdata/citus/issues/2009#issuecomment-653036502
+CREATE TABLE users_table_2 (user_id int primary key, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint);
+SELECT create_reference_table('users_table_2');
+
+PREPARE p4 (int, int) AS insert into users_table_2 ( value_1, user_id) select value_1, user_id + $2 FROM users_table_2 ON CONFLICT (user_id) DO UPDATE SET value_2 = EXCLUDED.value_1 + $1;
+EXPLAIN :default_explain_flags execute p4(20,20);
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on users_table_2_570028 citus_table_alias
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: users_table_2_pkey_570028
+ -> Seq Scan on users_table_2_570028 users_table_2
+EXPLAIN :default_analyze_flags execute p4(20,20);
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Insert on users_table_2_570028 citus_table_alias (actual rows=0 loops=1)
+ Conflict Resolution: UPDATE
+ Conflict Arbiter Indexes: users_table_2_pkey_570028
+ Tuples Inserted: 0
+ Conflicting Tuples: 0
+ -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1)
+-- simple test to confirm we can fetch long (>4KB) plans
+EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM users_table_2 WHERE value_1::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X';
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on users_table_2_570028 users_table_2 (actual rows=0 loops=1)
+ Filter: ((value_1)::text = '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000X'::text)
+DROP TABLE users_table_2;
+-- sorted explain analyze output
+CREATE TABLE explain_analyze_execution_time (a int);
+INSERT INTO explain_analyze_execution_time VALUES (2);
+SELECT create_distributed_table('explain_analyze_execution_time', 'a');
+
+-- show that we can sort the output wrt execution time
+-- we do the following hack to make the test outputs
+-- be consistent. First, ingest a single row then add
+-- pg_sleep() call on the query. Postgres will only
+-- sleep for the shard that has the single row, so that
+-- will definitely be slower
+set citus.explain_analyze_sort_method to "taskId";
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_execution_time_570029 explain_analyze_execution_time (actual rows=0 loops=1)
+set citus.explain_analyze_sort_method to "execution-time";
+EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE) select a, CASE WHEN pg_sleep(0.4) IS NULL THEN 'x' END from explain_analyze_execution_time;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 4 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 4 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on explain_analyze_execution_time_570030 explain_analyze_execution_time (actual rows=1 loops=1)
+-- reset back
+reset citus.explain_analyze_sort_method;
+DROP TABLE explain_analyze_execution_time;
+CREATE SCHEMA multi_explain;
+SET search_path TO multi_explain;
+-- test EXPLAIN ANALYZE when original query returns no columns
+CREATE TABLE reference_table(a int);
+SELECT create_reference_table('reference_table');
+
+INSERT INTO reference_table VALUES (1);
+EXPLAIN :default_analyze_flags SELECT FROM reference_table;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on reference_table_570031 reference_table (actual rows=1 loops=1)
+CREATE TABLE distributed_table_1(a int, b int);
+SELECT create_distributed_table('distributed_table_1','a');
+
+INSERT INTO distributed_table_1 values (1,1);
+EXPLAIN :default_analyze_flags SELECT row_number() OVER() AS r FROM distributed_table_1;
+WindowAgg (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
+CREATE TABLE distributed_table_2(a int, b int);
+SELECT create_distributed_table('distributed_table_2','a');
+
+INSERT INTO distributed_table_2 VALUES (1,1);
+EXPLAIN :default_analyze_flags
+WITH r AS (SELECT row_number() OVER () AS r FROM distributed_table_1)
+SELECT * FROM distributed_table_2
+JOIN r ON (r = distributed_table_2.b)
+LIMIT 3;
+Limit (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ -> Distributed Subplan XXX_1
+ Intermediate Data Size: 14 bytes
+ Result destination: Send to 2 nodes
+ -> WindowAgg (actual rows=1 loops=1)
+ -> Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 16 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 16 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Limit (actual rows=1 loops=1)
+ -> Nested Loop (actual rows=1 loops=1)
+ Join Filter: (distributed_table_2.b = intermediate_result.r)
+ -> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
+ -> Seq Scan on distributed_table_2_570034 distributed_table_2 (actual rows=1 loops=1)
+EXPLAIN :default_analyze_flags SELECT FROM (SELECT * FROM reference_table) subquery;
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on reference_table_570031 reference_table (actual rows=1 loops=1)
+PREPARE dummy_prep_stmt(int) AS SELECT FROM distributed_table_1;
+EXPLAIN :default_analyze_flags EXECUTE dummy_prep_stmt(50);
+Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
+ Task Count: 2
+ Tasks Shown: One of 2
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on distributed_table_1_570032 distributed_table_1 (actual rows=1 loops=1)
+CREATE TYPE multi_explain.int_wrapper_type AS (int_field int);
+CREATE TABLE tbl (a int, b multi_explain.int_wrapper_type);
+SELECT create_distributed_table('tbl', 'a');
+
+EXPLAIN :default_analyze_flags SELECT * FROM tbl;
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 2
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: One of 2
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
+PREPARE q1(int_wrapper_type) AS WITH a AS (SELECT * FROM tbl WHERE b = $1 AND a = 1 OFFSET 0) SELECT * FROM a;
+EXPLAIN (COSTS false) EXECUTE q1('(1)');
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tbl_570036 tbl
+ Filter: ((b = '(1)'::multi_explain.int_wrapper_type) AND (a = 1))
+EXPLAIN :default_analyze_flags EXECUTE q1('(1)');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
+ Filter: ((b = $1) AND (a = 1))
+PREPARE q2(int_wrapper_type) AS WITH a AS (UPDATE tbl SET b = $1 WHERE a = 1 RETURNING *) SELECT * FROM a;
+EXPLAIN (COSTS false) EXECUTE q2('(1)');
+Custom Scan (Citus Adaptive)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> CTE Scan on a
+ CTE a
+ -> Update on tbl_570036 tbl
+ -> Seq Scan on tbl_570036 tbl
+ Filter: (a = 1)
+EXPLAIN :default_analyze_flags EXECUTE q2('(1)');
+Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
+ Task Count: 1
+ Tuple data received from nodes: 0 bytes
+ Tasks Shown: All
+ -> Task
+ Tuple data received from node: 0 bytes
+ Node: host=localhost port=xxxxx dbname=regression
+ -> CTE Scan on a (actual rows=0 loops=1)
+ CTE a
+ -> Update on tbl_570036 tbl (actual rows=0 loops=1)
+ -> Seq Scan on tbl_570036 tbl (actual rows=0 loops=1)
+ Filter: (a = 1)
+-- check when auto explain + analyze is enabled, we do not allow local execution.
+CREATE SCHEMA test_auto_explain;
+SET search_path TO 'test_auto_explain';
+CREATE TABLE test_ref_table (key int PRIMARY KEY);
+SELECT create_reference_table('test_ref_table');
+
+LOAD 'auto_explain';
+SET auto_explain.log_min_duration = 0;
+set auto_explain.log_analyze to true;
+-- the following should not be locally executed since explain analyze is on
+select * from test_ref_table;
+DROP SCHEMA test_auto_explain CASCADE;
+SET client_min_messages TO ERROR;
+DROP SCHEMA multi_explain CASCADE;
diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out
index ca0c0514a..295b10c76 100644
--- a/src/test/regress/expected/multi_extension.out
+++ b/src/test/regress/expected/multi_extension.out
@@ -59,7 +59,9 @@ BEGIN
SELECT p.description previous_object, c.description current_object
FROM current_objects c FULL JOIN prev_objects p
ON p.description = c.description
- WHERE p.description is null OR c.description is null;
+ WHERE (p.description is null OR c.description is null)
+ AND c.description IS DISTINCT FROM 'function any_value(anyelement) anyelement'
+ AND c.description IS DISTINCT FROM 'function any_value_agg(anyelement,anyelement) anyelement';
DROP TABLE prev_objects;
ALTER TABLE current_objects RENAME TO prev_objects;
@@ -132,8 +134,6 @@ SELECT * FROM multi_extension.print_extension_changes();
---------------------------------------------------------------------
| event trigger citus_cascade_to_partition
| function alter_role_if_exists(text,text) boolean
- | function any_value(anyelement) anyelement
- | function any_value_agg(anyelement,anyelement) anyelement
| function array_cat_agg(anycompatiblearray) anycompatiblearray
| function assign_distributed_transaction_id(integer,bigint,timestamp with time zone) void
| function authinfo_valid(text) boolean
@@ -318,7 +318,7 @@ SELECT * FROM multi_extension.print_extension_changes();
| view citus_stat_statements
| view citus_worker_stat_activity
| view pg_dist_shard_placement
-(188 rows)
+(186 rows)
-- Test downgrade to 9.2-2 from 9.2-4
ALTER EXTENSION citus UPDATE TO '9.2-4';
@@ -986,7 +986,7 @@ DELETE FROM pg_dist_shard WHERE shardid = 1;
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3);
-INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
+INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null
@@ -1331,7 +1331,23 @@ SELECT * FROM multi_extension.print_extension_changes();
| view citus_stat_tenants_local
(11 rows)
--- Test downgrade to 11.3-1 from 12.0-1
+-- Test downgrade to 11.3-1 from 11.3-2
+ALTER EXTENSION citus UPDATE TO '11.3-2';
+ALTER EXTENSION citus UPDATE TO '11.3-1';
+-- Should be empty result since upgrade+downgrade should be a no-op
+SELECT * FROM multi_extension.print_extension_changes();
+ previous_object | current_object
+---------------------------------------------------------------------
+(0 rows)
+
+-- Snapshot of state at 11.3-2
+ALTER EXTENSION citus UPDATE TO '11.3-2';
+SELECT * FROM multi_extension.print_extension_changes();
+ previous_object | current_object
+---------------------------------------------------------------------
+(0 rows)
+
+-- Test downgrade to 11.3-2 from 12.0-1
ALTER EXTENSION citus UPDATE TO '12.0-1';
CREATE TABLE null_shard_key (x int, y int);
SET citus.shard_replication_factor TO 1;
@@ -1341,15 +1357,15 @@ SELECT create_distributed_table('null_shard_key', null);
(1 row)
--- Show that we cannot downgrade to 11.3-1 becuase the cluster has a
+-- Show that we cannot downgrade to 11.3-2 becuase the cluster has a
-- distributed table with single-shard.
-ALTER EXTENSION citus UPDATE TO '11.3-1';
+ALTER EXTENSION citus UPDATE TO '11.3-2';
ERROR: cannot downgrade Citus because there are distributed tables without a shard key.
DETAIL: To downgrade Citus to an older version, you should first convert those tables to Postgres tables by executing SELECT undistribute_table("%s").
HINT: You can find the distributed tables without a shard key in the cluster by using the following query: "SELECT * FROM citus_tables WHERE distribution_column = '' AND colocation_id > 0".
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
DROP TABLE null_shard_key;
-ALTER EXTENSION citus UPDATE TO '11.3-1';
+ALTER EXTENSION citus UPDATE TO '11.3-2';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
@@ -1359,24 +1375,61 @@ SELECT * FROM multi_extension.print_extension_changes();
-- Snapshot of state at 12.0-1
ALTER EXTENSION citus UPDATE TO '12.0-1';
SELECT * FROM multi_extension.print_extension_changes();
- previous_object | current_object
+ previous_object | current_object
---------------------------------------------------------------------
| function citus_internal_add_tenant_schema(oid,integer) void
| function citus_internal_delete_tenant_schema(oid) void
| function citus_internal_unregister_tenant_schema_globally(oid,text) void
| function citus_schema_distribute(regnamespace) void
| function citus_schema_undistribute(regnamespace) void
- | function citus_stat_tenants_local_internal(boolean) SETOF record
+ | function citus_stat_tenants_local_internal(boolean) SETOF record
| table pg_dist_schema
| view public.citus_schemas
(8 rows)
+-- Test downgrade to 12.0-1 from 12.1-1
+ALTER EXTENSION citus UPDATE TO '12.1-1';
+ALTER EXTENSION citus UPDATE TO '12.0-1';
+-- Should be empty result since upgrade+downgrade should be a no-op
+SELECT * FROM multi_extension.print_extension_changes();
+ previous_object | current_object
+---------------------------------------------------------------------
+(0 rows)
+
+-- Snapshot of state at 12.1-1
+ALTER EXTENSION citus UPDATE TO '12.1-1';
+SELECT * FROM multi_extension.print_extension_changes();
+ previous_object | current_object
+---------------------------------------------------------------------
+ | function citus_internal_delete_placement_metadata(bigint) void
+ | function citus_internal_update_none_dist_table_metadata(oid,"char",bigint,boolean) void
+ | function citus_pause_node_within_txn(integer,boolean,integer) void
+ | function citus_schema_move(regnamespace,integer,citus.shard_transfer_mode) void
+ | function citus_schema_move(regnamespace,text,integer,citus.shard_transfer_mode) void
+(5 rows)
+
+-- Test downgrade to 12.1-1 from 12.2-1
+ALTER EXTENSION citus UPDATE TO '12.2-1';
+ALTER EXTENSION citus UPDATE TO '12.1-1';
+-- Should be empty result since upgrade+downgrade should be a no-op
+SELECT * FROM multi_extension.print_extension_changes();
+ previous_object | current_object
+---------------------------------------------------------------------
+(0 rows)
+
+-- Snapshot of state at 12.2-1
+ALTER EXTENSION citus UPDATE TO '12.2-1';
+SELECT * FROM multi_extension.print_extension_changes();
+ previous_object | current_object
+---------------------------------------------------------------------
+(0 rows)
+
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version
SHOW citus.version;
citus.version
---------------------------------------------------------------------
- 12.0devel
+ 12.2devel
(1 row)
-- ensure no unexpected objects were created outside pg_catalog
@@ -1410,7 +1463,7 @@ DROP EXTENSION citus;
DROP EXTENSION citus_columnar;
CREATE EXTENSION citus VERSION '8.0-1';
ERROR: specified version incompatible with loaded Citus library
-DETAIL: Loaded library requires 12.0, but 8.0-1 was specified.
+DETAIL: Loaded library requires 12.2, but 8.0-1 was specified.
HINT: If a newer library is present, restart the database and try the command again.
-- Test non-distributed queries work even in version mismatch
SET citus.enable_version_checks TO 'false';
@@ -1455,7 +1508,7 @@ ORDER BY 1;
-- We should not distribute table in version mistmatch
SELECT create_distributed_table('version_mismatch_table', 'column1');
ERROR: loaded Citus library version differs from installed extension version
-DETAIL: Loaded library requires 12.0, but the installed extension version is 8.1-1.
+DETAIL: Loaded library requires 12.2, but the installed extension version is 8.1-1.
HINT: Run ALTER EXTENSION citus UPDATE and try again.
-- This function will cause fail in next ALTER EXTENSION
CREATE OR REPLACE FUNCTION pg_catalog.relation_is_a_known_shard(regclass)
diff --git a/src/test/regress/expected/multi_foreign_key.out b/src/test/regress/expected/multi_foreign_key.out
index 832be2740..e206a6fb6 100644
--- a/src/test/regress/expected/multi_foreign_key.out
+++ b/src/test/regress/expected/multi_foreign_key.out
@@ -856,7 +856,7 @@ SELECT create_reference_table('reference_table_second');
CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int);
DROP TABLE reference_table CASCADE;
NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey on table reference_table_second
-NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey_1350654 on table public.reference_table_second_1350654
+NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey_1350653 on table public.reference_table_second_1350653
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_local_table(id));
@@ -917,7 +917,7 @@ DROP TABLE reference_table CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to constraint fk on table references_to_reference_table
drop cascades to constraint fk on table reference_table_second
-NOTICE: drop cascades to constraint fk_1350663 on table public.reference_table_second_1350663
+NOTICE: drop cascades to constraint fk_1350662 on table public.reference_table_second_1350662
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int);
@@ -1277,6 +1277,6 @@ ERROR: cannot create foreign key constraint since Citus does not support ON DEL
-- we no longer need those tables
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2,
set_on_default_test_referenced, set_on_default_test_referencing;
-NOTICE: drop cascades to constraint fk_1350664 on table public.reference_table_1350664
+NOTICE: drop cascades to constraint fk_1350663 on table public.reference_table_1350663
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out
index 0a113c5f8..09b1ccd87 100644
--- a/src/test/regress/expected/multi_hash_pruning.out
+++ b/src/test/regress/expected/multi_hash_pruning.out
@@ -1232,31 +1232,20 @@ WHERE o_orderkey IN (1, 2)
-> Seq Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
(13 rows)
+SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
SELECT count(*)
FROM orders_hash_partitioned
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
WHERE o_orderkey IN (1, 2)
AND l_orderkey IN (2, 3);
- QUERY PLAN
+$Q$);
+ coordinator_plan
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 3
- Tasks Shown: One of 3
- -> Task
- Node: host=localhost port=xxxxx dbname=regression
- -> Aggregate
- -> Nested Loop
- Join Filter: (orders_hash_partitioned.o_orderkey = lineitem_hash_partitioned.l_orderkey)
- -> Seq Scan on orders_hash_partitioned_630000 orders_hash_partitioned
- Filter: (o_orderkey = ANY ('{1,2}'::integer[]))
- -> Materialize
- -> Bitmap Heap Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
- Recheck Cond: (l_orderkey = ANY ('{2,3}'::integer[]))
- -> Bitmap Index Scan on lineitem_hash_partitioned_pkey_630004
- Index Cond: (l_orderkey = ANY ('{2,3}'::integer[]))
-(16 rows)
+(3 rows)
SET citus.task_executor_type TO DEFAULT;
DROP TABLE lineitem_hash_partitioned;
diff --git a/src/test/regress/expected/multi_having_pushdown.out b/src/test/regress/expected/multi_having_pushdown.out
index d2051a55c..a1ef9f52f 100644
--- a/src/test/regress/expected/multi_having_pushdown.out
+++ b/src/test/regress/expected/multi_having_pushdown.out
@@ -120,7 +120,7 @@ EXPLAIN (COSTS FALSE)
SELECT sum(l_extendedprice * l_discount) as revenue
FROM lineitem_hash, orders_hash
WHERE o_orderkey = l_orderkey
- GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24
+ GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24
ORDER BY 1 DESC LIMIT 3;
QUERY PLAN
---------------------------------------------------------------------
@@ -136,7 +136,7 @@ EXPLAIN (COSTS FALSE)
-> Sort
Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC
-> HashAggregate
- Group Key: lineitem_hash.l_orderkey, orders_hash.o_orderkey, lineitem_hash.l_shipmode
+ Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_shipmode
Filter: (sum(lineitem_hash.l_quantity) > '24'::numeric)
-> Hash Join
Hash Cond: (orders_hash.o_orderkey = lineitem_hash.l_orderkey)
diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out
index ac339a620..26a7dfcf5 100644
--- a/src/test/regress/expected/multi_insert_select.out
+++ b/src/test/regress/expected/multi_insert_select.out
@@ -164,6 +164,7 @@ INSERT INTO raw_events_first (user_id, time) VALUES
-- try a single shard query
SET client_min_messages TO DEBUG2;
INSERT INTO raw_events_second (user_id, time) SELECT user_id, time FROM raw_events_first WHERE user_id = 7;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_second_13300005 AS citus_table_alias (user_id, "time") SELECT raw_events_first.user_id, raw_events_first."time" FROM multi_insert_select.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) 7) AND (raw_events_first.user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -181,6 +182,7 @@ FROM
raw_events_first
WHERE
user_id = 8;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_first.user_id, raw_events_first."time", raw_events_first.value_1, raw_events_first.value_2, raw_events_first.value_3, raw_events_first.value_4 FROM multi_insert_select.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) 8) AND (raw_events_first.user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -193,6 +195,7 @@ FROM
raw_events_first
WHERE
false;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -205,6 +208,7 @@ FROM
raw_events_first
WHERE
0 != 0;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -381,6 +385,7 @@ FROM raw_events_first
WHERE user_id IN (SELECT user_id
FROM raw_events_second
WHERE user_id = 2);
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -403,6 +408,7 @@ SELECT user_id
FROM raw_events_first
WHERE user_id IN (SELECT user_id
FROM raw_events_second WHERE false);
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -741,6 +747,18 @@ FROM
((SELECT user_id FROM raw_events_first WHERE user_id = 15) EXCEPT
(SELECT user_id FROM raw_events_second where user_id = 17)) as foo;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Distributed planning for a fast-path router query
+DEBUG: Creating router plan
+DEBUG: query has a single distribution column value: 15
+DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM multi_insert_select.raw_events_first WHERE (user_id OPERATOR(pg_catalog.=) 15)
+DEBUG: Distributed planning for a fast-path router query
+DEBUG: Creating router plan
+DEBUG: query has a single distribution column value: 17
+DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM multi_insert_select.raw_events_second WHERE (user_id OPERATOR(pg_catalog.=) 17)
+DEBUG: Creating router plan
+DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) EXCEPT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)
+DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- some supported LEFT joins
@@ -774,6 +792,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
FROM
raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id
WHERE raw_events_first.user_id = 10;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (multi_insert_select.raw_events_first_13300000 raw_events_first LEFT JOIN multi_insert_select.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) 10) AND (raw_events_first.user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
@@ -794,6 +813,7 @@ DEBUG: distributed statement: INSERT INTO multi_insert_select.agg_events_133000
FROM
raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id
WHERE raw_events_first.user_id = 10 AND raw_events_first.user_id = 20;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300008 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
@@ -804,6 +824,7 @@ DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned
FROM
raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id
WHERE raw_events_first.user_id = 10 AND raw_events_second.user_id = 20;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300008 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
@@ -1253,6 +1274,7 @@ SELECT
user_id
FROM
reference_table;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Distributed planning for a fast-path router query
@@ -1650,6 +1672,7 @@ FROM raw_events_first
WHERE user_id IN (SELECT raw_events_second.user_id
FROM raw_events_second, raw_events_first
WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200);
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM multi_insert_select.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM multi_insert_select.raw_events_second_13300004 raw_events_second, multi_insert_select.raw_events_first_13300000 raw_events_first_1 WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first_1.user_id) AND (raw_events_first_1.user_id OPERATOR(pg_catalog.=) 200)))) AND (raw_events_first.user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -2022,6 +2045,7 @@ truncate raw_events_first;
SET client_min_messages TO DEBUG2;
-- now show that it works for a single shard query as well
INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT raw_events_second.user_id, raw_events_second."time", raw_events_second.value_1, raw_events_second.value_2, raw_events_second.value_3, raw_events_second.value_4 FROM multi_insert_select.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) 5) AND (raw_events_second.user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300002 since SELECT query for it pruned away
@@ -2372,6 +2396,7 @@ SELECT
s, nextval('insert_select_test_seq'), (random()*10)::int
FROM
generate_series(1, 5) s;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1;
@@ -2389,6 +2414,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
INSERT INTO raw_events_first (user_id, value_1)
SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s
ON CONFLICT DO NOTHING;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
@@ -2399,6 +2425,7 @@ DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_
INSERT INTO raw_events_first (user_id, value_1)
SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s
RETURNING *;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
@@ -3449,17 +3476,20 @@ ERROR: INSERT ... SELECT into an append-distributed table is not supported
-- verify that CTEs at top level of INSERT SELECT, that can normally be inlined, would not be inlined by INSERT SELECT pushdown planner
-- and handled by pull to coordinator.
SELECT coordinator_plan($$
- EXPLAIN (COSTS FALSE) WITH cte_1 AS (SELECT id FROM dist_table_5 WHERE id = 5)
+ EXPLAIN (COSTS FALSE) WITH cte_1 AS (SELECT id FROM dist_table_5 WHERE id > 5)
INSERT INTO dist_table_5
- SELECT id FROM dist_table_5 JOIN cte_1 USING(id);
+ SELECT id FROM dist_table_5 JOIN cte_1 USING(id) OFFSET 5;
$$);
- coordinator_plan
+ coordinator_plan
---------------------------------------------------------------------
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 1
-(4 rows)
+ -> Distributed Subplan XXX_1
+ -> Limit
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+(7 rows)
-- verify that CTEs at top level of SELECT part, would be inlined by Postgres and pushed down by INSERT SELECT planner.
SELECT coordinator_plan($$
diff --git a/src/test/regress/expected/multi_insert_select_0.out b/src/test/regress/expected/multi_insert_select_0.out
index a4988bceb..193c869b1 100644
--- a/src/test/regress/expected/multi_insert_select_0.out
+++ b/src/test/regress/expected/multi_insert_select_0.out
@@ -164,6 +164,7 @@ INSERT INTO raw_events_first (user_id, time) VALUES
-- try a single shard query
SET client_min_messages TO DEBUG2;
INSERT INTO raw_events_second (user_id, time) SELECT user_id, time FROM raw_events_first WHERE user_id = 7;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_second_13300005 AS citus_table_alias (user_id, "time") SELECT user_id, "time" FROM multi_insert_select.raw_events_first_13300001 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) 7) AND (user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -181,6 +182,7 @@ FROM
raw_events_first
WHERE
user_id = 8;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM multi_insert_select.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) 8) AND (user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -193,6 +195,7 @@ FROM
raw_events_first
WHERE
false;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -205,6 +208,7 @@ FROM
raw_events_first
WHERE
0 != 0;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -381,6 +385,7 @@ FROM raw_events_first
WHERE user_id IN (SELECT user_id
FROM raw_events_second
WHERE user_id = 2);
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -403,6 +408,7 @@ SELECT user_id
FROM raw_events_first
WHERE user_id IN (SELECT user_id
FROM raw_events_second WHERE false);
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -741,6 +747,18 @@ FROM
((SELECT user_id FROM raw_events_first WHERE user_id = 15) EXCEPT
(SELECT user_id FROM raw_events_second where user_id = 17)) as foo;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Distributed planning for a fast-path router query
+DEBUG: Creating router plan
+DEBUG: query has a single distribution column value: 15
+DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM multi_insert_select.raw_events_first WHERE (user_id OPERATOR(pg_catalog.=) 15)
+DEBUG: Distributed planning for a fast-path router query
+DEBUG: Creating router plan
+DEBUG: query has a single distribution column value: 17
+DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM multi_insert_select.raw_events_second WHERE (user_id OPERATOR(pg_catalog.=) 17)
+DEBUG: Creating router plan
+DEBUG: generating subplan XXX_3 for subquery SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) EXCEPT SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)
+DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
-- some supported LEFT joins
@@ -774,6 +792,7 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
FROM
raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id
WHERE raw_events_first.user_id = 10;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (multi_insert_select.raw_events_first_13300000 raw_events_first LEFT JOIN multi_insert_select.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id OPERATOR(pg_catalog.=) raw_events_second.user_id))) WHERE ((raw_events_first.user_id OPERATOR(pg_catalog.=) 10) AND (raw_events_first.user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
@@ -794,6 +813,7 @@ DEBUG: distributed statement: INSERT INTO multi_insert_select.agg_events_133000
FROM
raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id
WHERE raw_events_first.user_id = 10 AND raw_events_first.user_id = 20;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300008 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
@@ -804,6 +824,7 @@ DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned
FROM
raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id
WHERE raw_events_first.user_id = 10 AND raw_events_second.user_id = 20;
+DEBUG: Creating router plan
DEBUG: Skipping target shard interval 13300008 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away
@@ -1253,6 +1274,7 @@ SELECT
user_id
FROM
reference_table;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Distributed planning for a fast-path router query
@@ -1650,6 +1672,7 @@ FROM raw_events_first
WHERE user_id IN (SELECT raw_events_second.user_id
FROM raw_events_second, raw_events_first
WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200);
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM multi_insert_select.raw_events_first_13300000 raw_events_first WHERE ((user_id OPERATOR(pg_catalog.=) ANY (SELECT raw_events_second.user_id FROM multi_insert_select.raw_events_second_13300004 raw_events_second, multi_insert_select.raw_events_first_13300000 raw_events_first_1 WHERE ((raw_events_second.user_id OPERATOR(pg_catalog.=) raw_events_first_1.user_id) AND (raw_events_first_1.user_id OPERATOR(pg_catalog.=) 200)))) AND (user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away
@@ -2022,6 +2045,7 @@ truncate raw_events_first;
SET client_min_messages TO DEBUG2;
-- now show that it works for a single shard query as well
INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5;
+DEBUG: Creating router plan
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM multi_insert_select.raw_events_second_13300004 raw_events_second WHERE ((user_id OPERATOR(pg_catalog.=) 5) AND (user_id IS NOT NULL))
DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away
DEBUG: Skipping target shard interval 13300002 since SELECT query for it pruned away
@@ -2372,6 +2396,7 @@ SELECT
s, nextval('insert_select_test_seq'), (random()*10)::int
FROM
generate_series(1, 5) s;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1;
@@ -2389,6 +2414,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
INSERT INTO raw_events_first (user_id, value_1)
SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s
ON CONFLICT DO NOTHING;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) ON CONFLICT DO NOTHING
@@ -2399,6 +2425,7 @@ DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_
INSERT INTO raw_events_first (user_id, value_1)
SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s
RETURNING *;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
DEBUG: distributed statement: INSERT INTO multi_insert_select.raw_events_first_13300000 AS citus_table_alias (user_id, value_1) SELECT user_id, value_1 FROM read_intermediate_result('insert_select_XXX_13300000'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4
@@ -3449,17 +3476,20 @@ ERROR: INSERT ... SELECT into an append-distributed table is not supported
-- verify that CTEs at top level of INSERT SELECT, that can normally be inlined, would not be inlined by INSERT SELECT pushdown planner
-- and handled by pull to coordinator.
SELECT coordinator_plan($$
- EXPLAIN (COSTS FALSE) WITH cte_1 AS (SELECT id FROM dist_table_5 WHERE id = 5)
+ EXPLAIN (COSTS FALSE) WITH cte_1 AS (SELECT id FROM dist_table_5 WHERE id > 5)
INSERT INTO dist_table_5
- SELECT id FROM dist_table_5 JOIN cte_1 USING(id);
+ SELECT id FROM dist_table_5 JOIN cte_1 USING(id) OFFSET 5;
$$);
- coordinator_plan
+ coordinator_plan
---------------------------------------------------------------------
Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive)
- Task Count: 1
-(4 rows)
+ -> Distributed Subplan XXX_1
+ -> Limit
+ -> Custom Scan (Citus Adaptive)
+ Task Count: 4
+(7 rows)
-- verify that CTEs at top level of SELECT part, would be inlined by Postgres and pushed down by INSERT SELECT planner.
SELECT coordinator_plan($$
diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out
index e85253031..cb8f0c0e1 100644
--- a/src/test/regress/expected/multi_metadata_sync.out
+++ b/src/test/regress/expected/multi_metadata_sync.out
@@ -533,7 +533,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
+ mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@@ -672,7 +672,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
+ mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out
index 6e1ba6525..c81462e6f 100644
--- a/src/test/regress/expected/multi_metadata_sync_0.out
+++ b/src/test/regress/expected/multi_metadata_sync_0.out
@@ -533,7 +533,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
+ mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@@ -672,7 +672,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
+ mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
diff --git a/src/test/regress/expected/multi_move_mx.out b/src/test/regress/expected/multi_move_mx.out
index b6cc5d0d7..b5aeec8ca 100644
--- a/src/test/regress/expected/multi_move_mx.out
+++ b/src/test/regress/expected/multi_move_mx.out
@@ -148,7 +148,7 @@ SELECT pg_reload_conf();
CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637'
PUBLICATION pub_01 WITH (citus_use_authinfo=true);
ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist
-Either provide the file or change sslmode to disable server certificate verification.
+Either provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode to disable server certificate verification.
ALTER SYSTEM RESET citus.node_conninfo;
SELECT pg_reload_conf();
pg_reload_conf
diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out
index 6bdef048a..ac7f90826 100644
--- a/src/test/regress/expected/multi_mx_create_table.out
+++ b/src/test/regress/expected/multi_mx_create_table.out
@@ -53,7 +53,16 @@ CREATE OPERATOR citus_mx_test_schema.=== (
HASHES, MERGES
);
SET search_path TO public;
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+-- In PG16, read-only server settings lc_collate and lc_ctype are removed
+-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
+SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
+\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
+\endif
CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale);
CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out
index 01d9736f2..116269a4e 100644
--- a/src/test/regress/expected/multi_mx_hide_shard_names.out
+++ b/src/test/regress/expected/multi_mx_hide_shard_names.out
@@ -425,9 +425,25 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
test_table_2_1130000
(4 rows)
+-- PG16 added one more backend type B_STANDALONE_BACKEND
+-- and also alphabetized the backend types, hence the orders changed
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+SELECT 4 AS client_backend \gset
+SELECT 5 AS bgworker \gset
+SELECT 12 AS walsender \gset
+\else
+SELECT 3 AS client_backend \gset
+SELECT 4 AS bgworker \gset
+SELECT 9 AS walsender \gset
+\endif
-- say, we set it to bgworker
-- the shards and indexes do not show up
-SELECT set_backend_type(4);
+SELECT set_backend_type(:bgworker);
NOTICE: backend type switched to: background worker
set_backend_type
---------------------------------------------------------------------
@@ -445,7 +461,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
-- or, we set it to walsender
-- the shards and indexes do not show up
-SELECT set_backend_type(9);
+SELECT set_backend_type(:walsender);
NOTICE: backend type switched to: walsender
set_backend_type
---------------------------------------------------------------------
@@ -480,7 +496,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
RESET application_name;
-- but, client backends to see the shards
-SELECT set_backend_type(3);
+SELECT set_backend_type(:client_backend);
NOTICE: backend type switched to: client backend
set_backend_type
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out
index cba5b8181..e7855a898 100644
--- a/src/test/regress/expected/multi_mx_router_planner.out
+++ b/src/test/regress/expected/multi_mx_router_planner.out
@@ -275,7 +275,7 @@ id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2)
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
DEBUG: CTE id_author is going to be inlined via distributed planning
DEBUG: CTE id_title is going to be inlined via distributed planning
-DEBUG: cannot run command which targets multiple shards
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: Creating router plan
DEBUG: query has a single distribution column value: 2
DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM public.articles_hash_mx WHERE (author_id OPERATOR(pg_catalog.=) 2)
@@ -385,7 +385,7 @@ WITH RECURSIVE hierarchy as (
h.company_id = ce.company_id AND
ce.company_id = 2))
SELECT * FROM hierarchy WHERE LEVEL <= 2;
-DEBUG: cannot run command which targets multiple shards
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
ERROR: recursive CTEs are not supported in distributed queries
-- grouping sets are supported on single shard
SELECT
diff --git a/src/test/regress/expected/multi_orderby_limit_pushdown.out b/src/test/regress/expected/multi_orderby_limit_pushdown.out
index 9d67c9810..0a625e47f 100644
--- a/src/test/regress/expected/multi_orderby_limit_pushdown.out
+++ b/src/test/regress/expected/multi_orderby_limit_pushdown.out
@@ -374,17 +374,17 @@ LIMIT 2;
(2 rows)
EXPLAIN (COSTS OFF)
-SELECT ut.user_id, count(DISTINCT ut.value_2)
+SELECT ut.user_id, avg(ut.value_2)
FROM users_table ut, events_table et
WHERE ut.user_id = et.user_id and et.value_2 < 5
GROUP BY ut.user_id
ORDER BY 2, AVG(ut.value_1), 1 DESC
LIMIT 5;
- QUERY PLAN
+ QUERY PLAN
---------------------------------------------------------------------
Limit
-> Sort
- Sort Key: remote_scan.count, remote_scan.worker_column_3, remote_scan.user_id DESC
+ Sort Key: remote_scan.avg, remote_scan.worker_column_3, remote_scan.user_id DESC
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
@@ -392,16 +392,14 @@ LIMIT 5;
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
- Sort Key: (count(DISTINCT ut.value_2)), (avg(ut.value_1)), ut.user_id DESC
- -> GroupAggregate
+ Sort Key: (avg(ut.value_2)), (avg(ut.value_1)), ut.user_id DESC
+ -> HashAggregate
Group Key: ut.user_id
- -> Sort
- Sort Key: ut.user_id DESC
- -> Hash Join
- Hash Cond: (ut.user_id = et.user_id)
- -> Seq Scan on users_table_1400256 ut
- -> Hash
- -> Seq Scan on events_table_1400260 et
- Filter: (value_2 < 5)
-(21 rows)
+ -> Hash Join
+ Hash Cond: (ut.user_id = et.user_id)
+ -> Seq Scan on users_table_1400256 ut
+ -> Hash
+ -> Seq Scan on events_table_1400260 et
+ Filter: (value_2 < 5)
+(19 rows)
diff --git a/src/test/regress/expected/multi_prune_shard_list.out b/src/test/regress/expected/multi_prune_shard_list.out
index 7bbfaeb88..3762bd05d 100644
--- a/src/test/regress/expected/multi_prune_shard_list.out
+++ b/src/test/regress/expected/multi_prune_shard_list.out
@@ -86,7 +86,7 @@ SELECT prune_using_both_values('pruning', 'tomato', 'rose');
SELECT debug_equality_expression('pruning');
debug_equality_expression
---------------------------------------------------------------------
- {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1}
+ {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1}
(1 row)
-- print the initial ordering of shard intervals
diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out
index 3d8d8a787..4b72a439b 100644
--- a/src/test/regress/expected/multi_replicate_reference_table.out
+++ b/src/test/regress/expected/multi_replicate_reference_table.out
@@ -793,7 +793,7 @@ WHERE
ORDER BY 1,4,5;
shardid | shardstate | shardlength | nodename | nodeport
---------------------------------------------------------------------
- 1370021 | 1 | 0 | localhost | 57637
+ 1370019 | 1 | 0 | localhost | 57637
(1 row)
-- we should see the two shard placements after activation
@@ -818,7 +818,7 @@ WHERE
ORDER BY 1,4,5;
shardid | shardstate | shardlength | nodename | nodeport
---------------------------------------------------------------------
- 1370021 | 1 | 0 | localhost | 57637
+ 1370019 | 1 | 0 | localhost | 57637
(1 row)
SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out
index edfc728db..c6d46ccc9 100644
--- a/src/test/regress/expected/multi_router_planner.out
+++ b/src/test/regress/expected/multi_router_planner.out
@@ -357,7 +357,7 @@ DEBUG: Creating router plan
WITH id_author AS MATERIALIZED ( SELECT id, author_id FROM articles_hash WHERE author_id = 1),
id_title AS MATERIALIZED (SELECT id, title from articles_hash WHERE author_id = 2)
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
-DEBUG: cannot run command which targets multiple shards
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: generating subplan XXX_1 for CTE id_author: SELECT id, author_id FROM multi_router_planner.articles_hash WHERE (author_id OPERATOR(pg_catalog.=) 1)
DEBUG: Creating router plan
DEBUG: query has a single distribution column value: 1
@@ -450,7 +450,7 @@ WITH RECURSIVE hierarchy as MATERIALIZED (
h.company_id = ce.company_id AND
ce.company_id = 2))
SELECT * FROM hierarchy WHERE LEVEL <= 2;
-DEBUG: cannot run command which targets multiple shards
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
ERROR: recursive CTEs are not supported in distributed queries
-- Test router modifying CTEs
WITH new_article AS MATERIALIZED(
@@ -1505,7 +1505,7 @@ SET citus.enable_non_colocated_router_query_pushdown TO OFF;
SELECT a.author_id as first_author, b.word_count as second_word_count
FROM articles_hash a, articles_single_shard_hash b
WHERE a.author_id = 10 and a.author_id = b.author_id and false;
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Creating router plan
first_author | second_word_count
---------------------------------------------------------------------
(0 rows)
@@ -1599,7 +1599,7 @@ HINT: Set citus.enable_repartition_joins to on to enable repartitioning
SELECT a.author_id as first_author, b.word_count as second_word_count
FROM articles_hash a, articles_single_shard_hash b
WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 2);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Creating router plan
first_author | second_word_count
---------------------------------------------------------------------
(0 rows)
@@ -1637,7 +1637,7 @@ SELECT a.author_id as first_author, b.word_count as second_word_count
FROM articles_hash a, articles_single_shard_hash b
WHERE a.author_id = 10 and a.author_id = b.author_id and
date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Creating router plan
first_author | second_word_count
---------------------------------------------------------------------
(0 rows)
diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out
index 474d4a107..25cc8a1a7 100644
--- a/src/test/regress/expected/multi_router_planner_fast_path.out
+++ b/src/test/regress/expected/multi_router_planner_fast_path.out
@@ -220,7 +220,7 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 2)
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
DEBUG: CTE id_author is going to be inlined via distributed planning
DEBUG: CTE id_title is going to be inlined via distributed planning
-DEBUG: cannot run command which targets multiple shards
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: query has a single distribution column value: 2
@@ -1874,40 +1874,52 @@ DEBUG: Creating router plan
-- use fast-path queries
PREPARE insert_sel(int, int) AS
INSERT INTO articles_hash
- SELECT * FROM articles_hash WHERE author_id = $2 AND word_count = $1 OFFSET 0;
+ SELECT max(id), max(author_id), title, word_count FROM articles_hash WHERE author_id = $2 AND word_count = $1 GROUP BY title, word_count;
EXECUTE insert_sel(1,1);
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Deferred pruning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
EXECUTE insert_sel(1,1);
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Deferred pruning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
EXECUTE insert_sel(1,1);
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Deferred pruning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
EXECUTE insert_sel(1,1);
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Deferred pruning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
EXECUTE insert_sel(1,1);
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Deferred pruning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
EXECUTE insert_sel(1,1);
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Deferred pruning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
diff --git a/src/test/regress/expected/multi_row_router_insert.out b/src/test/regress/expected/multi_row_router_insert.out
index d5af7e467..b765cf370 100644
--- a/src/test/regress/expected/multi_row_router_insert.out
+++ b/src/test/regress/expected/multi_row_router_insert.out
@@ -89,9 +89,9 @@ NOTICE: executing the command locally: INSERT INTO multi_row_router_insert.citu
INSERT INTO citus_local_table (a) VALUES (12), (13);
NOTICE: executing the command locally: INSERT INTO multi_row_router_insert.citus_local_table_1511001 AS citus_table_alias (a, b) VALUES (12,100), (13,100)
ALTER TABLE citus_local_table ADD COLUMN c INT DEFAULT to_number('5', '91');
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511001, 'multi_row_router_insert', 'ALTER TABLE citus_local_table ADD COLUMN c INT DEFAULT to_number(''5'', ''91'');')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511001, 'multi_row_router_insert', 'ALTER TABLE citus_local_table ADD COLUMN c integer DEFAULT to_number(''5''::text, ''91''::text);')
ALTER TABLE citus_local_table ADD COLUMN d INT;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511001, 'multi_row_router_insert', 'ALTER TABLE citus_local_table ADD COLUMN d INT;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1511001, 'multi_row_router_insert', 'ALTER TABLE citus_local_table ADD COLUMN d integer;')
INSERT INTO citus_local_table (d, a, b) VALUES (13, 14, 15), (16, 17, 18), (19, 20, 21);
NOTICE: executing the command locally: INSERT INTO multi_row_router_insert.citus_local_table_1511001 AS citus_table_alias (a, b, c, d) VALUES (14,15,5,13), (17,18,5,16), (20,21,5,19)
SELECT * FROM citus_local_table ORDER BY 1,2,3,4;
diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out
index f39f5f2b1..2de95266b 100644
--- a/src/test/regress/expected/multi_schema_support.out
+++ b/src/test/regress/expected/multi_schema_support.out
@@ -347,7 +347,16 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4;
--test COLLATION with schema
SET search_path TO public;
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+-- In PG16, read-only server settings lc_collate and lc_ctype are removed
+-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
+SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
+\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
+\endif
CREATE COLLATION test_schema_support.english (LOCALE = :current_locale);
\c - - - :master_port
SET citus.shard_replication_factor TO 2;
@@ -1384,6 +1393,284 @@ BEGIN;
ALTER SCHEMA bar RENAME TO foo;
ROLLBACK;
+-- below tests are to verify dependency propagation with nested sub-transactions
+-- TEST1
+BEGIN;
+ CREATE SCHEMA sc1;
+ CREATE SEQUENCE sc1.seq;
+ CREATE TABLE sc1.s1(id int default(nextval('sc1.seq')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to sequence sc1.seq
+drop cascades to table sc1.s1
+-- TEST2
+CREATE SCHEMA sc1;
+BEGIN;
+ CREATE SEQUENCE sc1.seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('sc1.seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to sequence sc1.seq1
+drop cascades to table sc1.s1
+-- TEST3
+SET citus.enable_metadata_sync TO off;
+CREATE SCHEMA sc1;
+SET citus.enable_metadata_sync TO on;
+BEGIN;
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+-- TEST4
+BEGIN;
+ SAVEPOINT sp1;
+ CREATE SCHEMA sc1;
+ ROLLBACK TO SAVEPOINT sp1;
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+-- TEST5
+BEGIN;
+ SAVEPOINT sp1;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp1;
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+DROP SEQUENCE seq1;
+-- TEST6
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc1;
+ ROLLBACK TO SAVEPOINT sp2;
+ RELEASE SAVEPOINT sp1;
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+-- TEST7
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp2;
+ RELEASE SAVEPOINT sp1;
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+DROP SEQUENCE seq1;
+-- TEST8
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp2;
+ ROLLBACK TO SAVEPOINT sp1;
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+-- TEST9
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ ROLLBACK TO SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+DROP SEQUENCE seq1;
+-- TEST10
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ RELEASE SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc3;
+ SAVEPOINT sp4;
+ CREATE SCHEMA sc1;
+ ROLLBACK TO SAVEPOINT sp4;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+DROP SCHEMA sc2 CASCADE;
+DROP SCHEMA sc3 CASCADE;
+-- TEST11
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ RELEASE SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc3;
+ SAVEPOINT sp4;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp4;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+DROP SCHEMA sc2 CASCADE;
+DROP SCHEMA sc3 CASCADE;
+DROP SEQUENCE seq1;
+-- TEST12
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ RELEASE SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc3;
+ SAVEPOINT sp4;
+ CREATE SEQUENCE seq1;
+ CREATE SCHEMA sc1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ RELEASE SAVEPOINT sp4;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+NOTICE: drop cascades to table sc1.s1
+DROP SCHEMA sc2 CASCADE;
+DROP SCHEMA sc3 CASCADE;
+DROP SEQUENCE seq1;
+-- issue-6614
+CREATE FUNCTION create_schema_test() RETURNS void AS $$
+BEGIN
+ SET citus.create_object_propagation = 'deferred';
+ CREATE SCHEMA test_1;
+ CREATE TABLE test_1.test (
+ id bigserial constraint test_pk primary key,
+ creation_date timestamp constraint test_creation_date_df default timezone('UTC'::text, CURRENT_TIMESTAMP) not null
+ );
+ PERFORM create_reference_table('test_1.test');
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+SELECT create_schema_test();
+ create_schema_test
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT result FROM run_command_on_all_nodes($$ SELECT COUNT(*) = 1 FROM pg_dist_partition WHERE logicalrelid = 'test_1.test'::regclass $$);
+ result
+---------------------------------------------------------------------
+ t
+ t
+ t
+(3 rows)
+
+DROP FUNCTION create_schema_test;
+DROP SCHEMA test_1 CASCADE;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to table test_1.test
+drop cascades to table test_1.test_1197064
-- Clean up the created schema
SET client_min_messages TO WARNING;
SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM pg_catalog.pg_dist_object
diff --git a/src/test/regress/expected/multi_select_distinct.out b/src/test/regress/expected/multi_select_distinct.out
index 75d47026b..689adcc8a 100644
--- a/src/test/regress/expected/multi_select_distinct.out
+++ b/src/test/regress/expected/multi_select_distinct.out
@@ -813,7 +813,7 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
- GROUP BY l_orderkey
+ GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
QUERY PLAN
---------------------------------------------------------------------
@@ -827,9 +827,9 @@ EXPLAIN (COSTS FALSE)
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
- Group Key: l_orderkey
+ Group Key: l_orderkey, l_partkey, l_shipmode
-> Sort
- Sort Key: l_orderkey
+ Sort Key: l_orderkey, l_partkey, l_shipmode
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(14 rows)
@@ -839,7 +839,7 @@ SET enable_hashagg TO off;
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
- GROUP BY l_orderkey
+ GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
QUERY PLAN
---------------------------------------------------------------------
@@ -852,9 +852,9 @@ EXPLAIN (COSTS FALSE)
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
- Group Key: l_orderkey
+ Group Key: l_orderkey, l_partkey, l_shipmode
-> Sort
- Sort Key: l_orderkey
+ Sort Key: l_orderkey, l_partkey, l_shipmode
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(13 rows)
diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out
index 1cf2e806d..8a984f884 100644
--- a/src/test/regress/expected/multi_sequence_default.out
+++ b/src/test/regress/expected/multi_sequence_default.out
@@ -143,6 +143,8 @@ SELECT create_distributed_table('seq_test_4','x');
CREATE SEQUENCE seq_4;
ALTER TABLE seq_test_4 ADD COLUMN a bigint DEFAULT nextval('seq_4');
+ALTER TABLE seq_test_4 ADD COLUMN IF NOT EXISTS a bigint DEFAULT nextval('seq_4');
+NOTICE: column "a" of relation "seq_test_4" already exists, skipping
DROP SEQUENCE seq_4 CASCADE;
NOTICE: drop cascades to default value for column a of table seq_test_4
TRUNCATE seq_test_4;
@@ -879,7 +881,7 @@ ROLLBACK;
-- Show that existing sequence has been renamed and a new sequence with the same name
-- created for another type
\c - - - :worker_1_port
-SELECT seqrelid::regclass, seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text like '%sequence_rollback%' ORDER BY 1,2;
+SELECT seqrelid::regclass, seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text in ('sequence_rollback', '"sequence_rollback(citus_backup_0)"') ORDER BY 1,2;
seqrelid | seqtypid | seqmax | seqmin
---------------------------------------------------------------------
"sequence_rollback(citus_backup_0)" | integer | 2147483647 | 1
diff --git a/src/test/regress/expected/multi_subquery.out b/src/test/regress/expected/multi_subquery.out
index f4c4ccc21..60f978f5e 100644
--- a/src/test/regress/expected/multi_subquery.out
+++ b/src/test/regress/expected/multi_subquery.out
@@ -1062,7 +1062,7 @@ SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value)
(26 rows)
EXPLAIN (COSTS OFF)
-SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
+SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1);
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
@@ -1070,20 +1070,18 @@ SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value)
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
- -> GroupAggregate
- Group Key: k1.key
+ -> Aggregate
Filter: (sum(k1.value) > $0)
InitPlan 1 (returns $0)
-> Limit
-> Sort
Sort Key: (sum(k2.value)) DESC
- -> GroupAggregate
- Group Key: k2.key
+ -> Aggregate
-> Seq Scan on keyval2_xxxxxxx k2
Filter: (key = 2)
-> Seq Scan on keyval1_xxxxxxx k1
Filter: (key = 2)
-(18 rows)
+(16 rows)
-- Simple join subquery pushdown
SELECT
diff --git a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out
index 52cbe3917..0f656ee0b 100644
--- a/src/test/regress/expected/multi_subquery_in_where_reference_clause.out
+++ b/src/test/regress/expected/multi_subquery_in_where_reference_clause.out
@@ -152,7 +152,7 @@ SELECT
FROM
users_table RIGHT JOIN users_reference_table USING (user_id)
WHERE
- users_table.value_2 IN
+ users_reference_table.value_2 IN
(SELECT
value_2
FROM
diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out
index 55f286d1f..b8758e561 100644
--- a/src/test/regress/expected/multi_test_helpers.out
+++ b/src/test/regress/expected/multi_test_helpers.out
@@ -167,3 +167,362 @@ BEGIN
EXECUTE 'SELECT COUNT(*) FROM pg_catalog.pg_dist_cleanup' INTO record_count;
END LOOP;
END$$ LANGUAGE plpgsql;
+-- Returns the foreign keys where the referencing relation's name starts with
+-- given prefix.
+--
+-- Foreign keys are groupped by their configurations and then the constraint name,
+-- referencing table, and referenced table for each distinct configuration are
+-- aggregated into arrays.
+CREATE OR REPLACE FUNCTION get_grouped_fkey_constraints(referencing_relname_prefix text)
+RETURNS jsonb AS $func$
+ DECLARE
+ confdelsetcols_column_ref text;
+ get_grouped_fkey_constraints_query text;
+ result jsonb;
+ BEGIN
+ -- Read confdelsetcols as null if no such column exists.
+ -- This can only be the case for PG versions < 15.
+ IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_constraint'::regclass AND attname='confdelsetcols')
+ THEN
+ confdelsetcols_column_ref := '(SELECT array_agg(attname ORDER BY attnum) FROM pg_attribute WHERE attrelid = conrelid AND attnum = ANY(confdelsetcols))';
+ ELSE
+ confdelsetcols_column_ref := '(SELECT null::smallint[])';
+ END IF;
+
+ EXECUTE format(
+ $$
+ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.constraint_names) AS fkeys_with_different_config FROM (
+ SELECT array_agg(constraint_name ORDER BY constraint_oid) AS constraint_names,
+ array_agg(referencing_table::regclass::text ORDER BY constraint_oid) AS referencing_tables,
+ array_agg(referenced_table::regclass::text ORDER BY constraint_oid) AS referenced_tables,
+ referencing_columns, referenced_columns, deferable, deferred, on_update, on_delete, match_type, referencing_columns_set_null_or_default
+ FROM (
+ SELECT
+ oid AS constraint_oid,
+ conname AS constraint_name,
+ conrelid AS referencing_table,
+ (SELECT array_agg(attname ORDER BY attnum) FROM pg_attribute WHERE attrelid = conrelid AND attnum = ANY(conkey)) AS referencing_columns,
+ confrelid AS referenced_table,
+ (SELECT array_agg(attname ORDER BY attnum) FROM pg_attribute WHERE attrelid = confrelid AND attnum = ANY(confkey)) AS referenced_columns,
+ condeferrable AS deferable,
+ condeferred AS deferred,
+ confupdtype AS on_update,
+ confdeltype AS on_delete,
+ confmatchtype AS match_type,
+ %2$s AS referencing_columns_set_null_or_default
+ FROM pg_constraint WHERE starts_with(conrelid::regclass::text, '%1$s') AND contype = 'f'
+ ) q2
+ GROUP BY referencing_columns, referenced_columns, deferable, deferred, on_update, on_delete, match_type, referencing_columns_set_null_or_default
+ ) q1
+ $$,
+ referencing_relname_prefix,
+ confdelsetcols_column_ref
+ ) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+CREATE OR REPLACE FUNCTION get_index_defs(schemaname text, tablename text)
+RETURNS jsonb AS $func$
+ DECLARE
+ result jsonb;
+ indnullsnotdistinct_column_ref text;
+ BEGIN
+ -- Not use indnullsnotdistinct in group by clause if no such column exists.
+ -- This can only be the case for PG versions < 15.
+ IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_index'::regclass AND attname='indnullsnotdistinct')
+ THEN
+ indnullsnotdistinct_column_ref := ',indnullsnotdistinct';
+ ELSE
+ indnullsnotdistinct_column_ref := '';
+ END IF;
+
+ EXECUTE format(
+ $$
+ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.indexnames) AS index_defs FROM (
+ SELECT array_agg(indexname ORDER BY indexrelid) AS indexnames,
+ array_agg(indexdef ORDER BY indexrelid) AS indexdefs
+ FROM pg_indexes
+ JOIN pg_index
+ ON (indexrelid = (schemaname || '.' || indexname)::regclass)
+ WHERE schemaname = '%1$s' AND starts_with(tablename, '%2$s')
+ GROUP BY indnatts, indnkeyatts, indisunique, indisprimary, indisexclusion,
+ indimmediate, indisclustered, indisvalid, indisready, indislive,
+ indisreplident, indkey, indcollation, indclass, indoption, indexprs,
+ indpred %3$s
+ ) q1
+ $$,
+ schemaname, tablename, indnullsnotdistinct_column_ref) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+CREATE OR REPLACE FUNCTION get_column_defaults(schemaname text, tablename text)
+RETURNS jsonb AS $func$
+ DECLARE
+ result jsonb;
+ BEGIN
+ EXECUTE format(
+ $$
+ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.column_name) AS column_defs FROM (
+ SELECT column_name, column_default::text, generation_expression::text
+ FROM information_schema.columns
+ WHERE table_schema = '%1$s' AND table_name = '%2$s' AND
+ column_default IS NOT NULL OR generation_expression IS NOT NULL
+ ) q1
+ $$,
+ schemaname, tablename) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+CREATE OR REPLACE FUNCTION get_column_attrs(relname_prefix text)
+RETURNS jsonb AS $func$
+ DECLARE
+ result jsonb;
+ BEGIN
+ EXECUTE format(
+ $$
+ SELECT to_jsonb(q2.*) FROM (
+ SELECT relnames, jsonb_agg(to_jsonb(q1.*) - 'relnames' ORDER BY q1.column_name) AS column_attrs FROM (
+ SELECT array_agg(attrelid::regclass::text ORDER BY attrelid) AS relnames,
+ attname AS column_name, typname AS type_name, collname AS collation_name, attcompression AS compression_method, attnotnull AS not_null
+ FROM pg_attribute pa
+ LEFT JOIN pg_type pt ON (pa.atttypid = pt.oid)
+ LEFT JOIN pg_collation pc1 ON (pa.attcollation = pc1.oid)
+ JOIN pg_class pc2 ON (pa.attrelid = pc2.oid)
+ WHERE starts_with(attrelid::regclass::text, '%1$s') AND
+ attnum > 0 AND NOT attisdropped AND relkind = 'r'
+ GROUP BY column_name, type_name, collation_name, compression_method, not_null
+ ) q1
+ GROUP BY relnames
+ ) q2
+ $$,
+ relname_prefix) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+-- Returns true if all shard placements of given table have given number of indexes.
+CREATE OR REPLACE FUNCTION verify_index_count_on_shard_placements(
+ qualified_table_name text,
+ n_expected_indexes int)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ v_result boolean;
+BEGIN
+ SELECT n_expected_indexes = ALL(
+ SELECT result::int INTO v_result
+ FROM run_command_on_placements(
+ qualified_table_name,
+ $$SELECT COUNT(*) FROM pg_index WHERE indrelid::regclass = '%s'::regclass$$
+ )
+ );
+ RETURN v_result;
+END;
+$func$ LANGUAGE plpgsql;
+-- Returns names of the foreign keys that shards of given table are involved in
+-- (as referencing or referenced one).
+CREATE OR REPLACE FUNCTION get_fkey_names_on_placements(
+ qualified_table_name text)
+RETURNS TABLE (
+ on_node text,
+ shard_id bigint,
+ fkey_names text[]
+)
+AS $func$
+BEGIN
+ RETURN QUERY SELECT
+ CASE WHEN groupid = 0 THEN 'on_coordinator' ELSE 'on_worker' END AS on_node_col,
+ shardid,
+ (CASE WHEN result = '' THEN '{}' ELSE result END)::text[] AS fkey_names_col
+ FROM run_command_on_placements(
+ qualified_table_name,
+ $$SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE '%s'::regclass IN (conrelid, confrelid) AND contype = 'f'$$
+ )
+ JOIN pg_dist_node USING (nodename, nodeport);
+END;
+$func$ LANGUAGE plpgsql;
+-- Returns true if all shard placements of given table have given number of partitions.
+CREATE OR REPLACE FUNCTION verify_partition_count_on_placements(
+ qualified_table_name text,
+ n_expected_partitions int)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ v_result boolean;
+BEGIN
+ SELECT n_expected_partitions = ALL(
+ SELECT result::int INTO v_result
+ FROM run_command_on_placements(
+ qualified_table_name,
+ $$SELECT COUNT(*) FROM pg_inherits WHERE inhparent = '%s'::regclass;$$
+ )
+ );
+ RETURN v_result;
+END;
+$func$ LANGUAGE plpgsql;
+-- This function checks pg_dist_placement on all nodes and returns true if the following holds:
+-- Whether shard is on the coordinator or on a primary worker node, and if this is expected.
+-- Given shardid is used for shard placement of the table.
+-- Placement metadata is correct on all nodes.
+CREATE OR REPLACE FUNCTION verify_shard_placement_for_single_shard_table(
+ qualified_table_name text,
+ expected_shard_id bigint,
+ expect_placement_on_coord boolean)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ nodename_nodeport_groupid record;
+ result boolean;
+BEGIN
+ SELECT nodename, nodeport, groupid INTO nodename_nodeport_groupid
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE noderole = 'primary' AND shouldhaveshards AND isactive AND
+ logicalrelid = qualified_table_name::regclass AND shardid = expected_shard_id;
+
+ IF nodename_nodeport_groupid IS NULL
+ THEN
+ RAISE NOTICE 'Shard placement is not on a primary worker node';
+ RETURN false;
+ END IF;
+
+ IF (nodename_nodeport_groupid.groupid = 0) != expect_placement_on_coord
+ THEN
+ RAISE NOTICE 'Shard placement is on an unexpected node';
+ RETURN false;
+ END IF;
+
+ -- verify that metadata on workers is correct too
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_workers($$
+ SELECT COUNT(*) = 1
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE logicalrelid = ''%s''::regclass AND
+ shardid = %s AND
+ nodename = ''%s'' AND
+ nodeport = %s AND
+ groupid = %s
+ $$)
+ );',
+ qualified_table_name, expected_shard_id,
+ nodename_nodeport_groupid.nodename,
+ nodename_nodeport_groupid.nodeport,
+ nodename_nodeport_groupid.groupid
+ )
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
+-- This function checks pg_dist_placement on all nodes and returns true if the following holds:
+-- Shard placement exist on coordinator and on all primary worker nodes.
+-- Given shardid is used for shard placements of the table.
+-- Given placementid is used for the coordinator shard placement.
+-- Placement metadata is correct on all nodes.
+CREATE OR REPLACE FUNCTION verify_shard_placements_for_reference_table(
+ qualified_table_name text,
+ expected_shard_id bigint,
+ expected_coord_placement_id bigint)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ result boolean;
+BEGIN
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_all_nodes($$
+ SELECT
+ (SELECT COUNT(*) FROM pg_dist_node WHERE noderole = ''primary'' AND isactive) =
+ (SELECT COUNT(*)
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE noderole = ''primary'' AND isactive AND
+ logicalrelid = ''%s''::regclass AND shardid = %s)
+ AND
+ (SELECT COUNT(*) = 1
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE noderole = ''primary'' AND isactive AND
+ logicalrelid = ''%s''::regclass AND shardid = %s AND
+ placementid = %s AND groupid = 0)
+
+ $$)
+ );',
+ qualified_table_name, expected_shard_id,
+ qualified_table_name, expected_shard_id,
+ expected_coord_placement_id
+ )
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
+-- This function checks pg_dist_partition on all nodes and returns true if the metadata
+-- record for given single-shard table is correct.
+CREATE OR REPLACE FUNCTION verify_pg_dist_partition_for_single_shard_table(
+ qualified_table_name text)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ result boolean;
+BEGIN
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_all_nodes($$
+ SELECT COUNT(*) = 1
+ FROM pg_dist_partition
+ WHERE logicalrelid = ''%s''::regclass AND
+ partmethod = ''n'' AND
+ partkey IS NULL AND
+ colocationid > 0 AND
+ repmodel = ''s'' AND
+ autoconverted = false
+ $$)
+ );',
+ qualified_table_name)
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
+-- This function checks pg_dist_partition on all nodes and returns true if the metadata
+-- record for given reference table is correct.
+CREATE OR REPLACE FUNCTION verify_pg_dist_partition_for_reference_table(
+ qualified_table_name text)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ result boolean;
+BEGIN
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_all_nodes($$
+ SELECT COUNT(*) = 1
+ FROM pg_dist_partition
+ WHERE logicalrelid = ''%s''::regclass AND
+ partmethod = ''n'' AND
+ partkey IS NULL AND
+ colocationid > 0 AND
+ repmodel = ''t'' AND
+ autoconverted = false
+ $$)
+ );',
+ qualified_table_name)
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
diff --git a/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out b/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out
new file mode 100644
index 000000000..2e396da7d
--- /dev/null
+++ b/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out
@@ -0,0 +1,364 @@
+ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
+SELECT pg_reload_conf();
+ pg_reload_conf
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT $definition$
+CREATE OR REPLACE FUNCTION test.maintenance_worker()
+ RETURNS pg_stat_activity
+ LANGUAGE plpgsql
+AS $$
+DECLARE
+ activity record;
+BEGIN
+ DO 'BEGIN END'; -- Force maintenance daemon to start
+ -- we don't want to wait forever; loop will exit after 20 seconds
+ FOR i IN 1 .. 200 LOOP
+ PERFORM pg_stat_clear_snapshot();
+ SELECT * INTO activity FROM pg_stat_activity
+ WHERE application_name = 'Citus Maintenance Daemon' AND datname = current_database();
+ IF activity.pid IS NOT NULL THEN
+ RETURN activity;
+ ELSE
+ PERFORM pg_sleep(0.1);
+ END IF ;
+ END LOOP;
+ -- fail if we reach the end of this loop
+ raise 'Waited too long for maintenance daemon to start';
+END;
+$$;
+$definition$ create_function_test_maintenance_worker
+\gset
+CREATE DATABASE db1;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+SELECT oid AS db1_oid
+FROM pg_database
+WHERE datname = 'db1'
+\gset
+\c - - - :worker_1_port
+CREATE DATABASE db1;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+\c - - - :worker_2_port
+CREATE DATABASE db1;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+\c db1 - - :worker_1_port
+CREATE EXTENSION citus;
+\c db1 - - :worker_2_port
+CREATE EXTENSION citus;
+\c db1 - - :master_port
+CREATE EXTENSION citus;
+SELECT citus_add_node('localhost', :worker_1_port);
+ citus_add_node
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT citus_add_node('localhost', :worker_2_port);
+ citus_add_node
+---------------------------------------------------------------------
+ 2
+(1 row)
+
+SELECT current_database();
+ current_database
+---------------------------------------------------------------------
+ db1
+(1 row)
+
+CREATE SCHEMA test;
+:create_function_test_maintenance_worker
+-- check maintenance daemon is started
+SELECT datname, current_database(),
+ usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
+FROM test.maintenance_worker();
+ datname | current_database | usename | extowner
+---------------------------------------------------------------------
+ db1 | db1 | postgres | postgres
+(1 row)
+
+SELECT *
+FROM pg_dist_node;
+ nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
+---------------------------------------------------------------------
+ 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
+ 2 | 2 | localhost | 57638 | default | t | t | primary | default | t | t
+(2 rows)
+
+CREATE DATABASE db2;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+SELECT oid AS db2_oid
+FROM pg_database
+WHERE datname = 'db2'
+\gset
+\c - - - :worker_1_port
+CREATE DATABASE db2;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+\c - - - :worker_2_port
+CREATE DATABASE db2;
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+\c db2 - - :worker_1_port
+CREATE EXTENSION citus;
+\c db2 - - :worker_2_port
+CREATE EXTENSION citus;
+\c db2 - - :master_port
+CREATE EXTENSION citus;
+SELECT citus_add_node('localhost', :worker_1_port);
+ citus_add_node
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT citus_add_node('localhost', :worker_2_port);
+ citus_add_node
+---------------------------------------------------------------------
+ 2
+(1 row)
+
+SELECT current_database();
+ current_database
+---------------------------------------------------------------------
+ db2
+(1 row)
+
+CREATE SCHEMA test;
+:create_function_test_maintenance_worker
+-- check maintenance daemon is started
+SELECT datname, current_database(),
+ usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
+FROM test.maintenance_worker();
+ datname | current_database | usename | extowner
+---------------------------------------------------------------------
+ db2 | db2 | postgres | postgres
+(1 row)
+
+SELECT *
+FROM pg_dist_node;
+ nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
+---------------------------------------------------------------------
+ 1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
+ 2 | 2 | localhost | 57638 | default | t | t | primary | default | t | t
+(2 rows)
+
+SELECT groupid AS worker_1_group_id
+FROM pg_dist_node
+WHERE nodeport = :worker_1_port;
+ worker_1_group_id
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+\gset
+SELECT groupid AS worker_2_group_id
+FROM pg_dist_node
+WHERE nodeport = :worker_2_port;
+ worker_2_group_id
+---------------------------------------------------------------------
+ 2
+(1 row)
+
+\gset
+-- Prepare transactions on first database
+\c db1 - - :worker_1_port
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_0_0_' || :'db1_oid' AS transaction_1_worker_1_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_1_db_1_name';
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_1_0_' || :'db1_oid' AS transaction_2_worker_1_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_1_db_1_name';
+\c db1 - - :worker_2_port
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_0_0_' || :'db1_oid' AS transaction_1_worker_2_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_2_db_1_name';
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_1_0_' || :'db1_oid' AS transaction_2_worker_2_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_2_db_1_name';
+-- Prepare transactions on second database
+\c db2 - - :worker_1_port
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_3_0_' || :'db2_oid' AS transaction_1_worker_1_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_1_db_2_name';
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_4_0_' || :'db2_oid' AS transaction_2_worker_1_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_1_db_2_name';
+\c db2 - - :worker_2_port
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_3_0_' || :'db2_oid' AS transaction_1_worker_2_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_2_db_2_name';
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_4_0_' || :'db2_oid' AS transaction_2_worker_2_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_2_db_2_name';
+\c db1 - - :master_port
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, :'transaction_2_worker_1_db_1_name'),
+ (:worker_2_group_id, :'transaction_2_worker_2_db_1_name');
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, 'citus_0_should_be_forgotten_' || :'db1_oid'),
+ (:worker_2_group_id, 'citus_0_should_be_forgotten_' || :'db1_oid');
+\c db2 - - :master_port
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, :'transaction_2_worker_1_db_2_name'),
+ (:worker_2_group_id, :'transaction_2_worker_2_db_2_name');
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, 'citus_0_should_be_forgotten_' || :'db2_oid'),
+ (:worker_2_group_id, 'citus_0_should_be_forgotten_' || :'db2_oid');
+\c db1 - - :master_port
+SELECT count(*) != 0
+FROM pg_dist_transaction;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT recover_prepared_transactions() > 0;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT count(*) = 0
+FROM pg_dist_transaction;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c db2 - - :master_port
+SELECT count(*) != 0
+FROM pg_dist_transaction;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT recover_prepared_transactions() > 0;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT count(*) = 0
+FROM pg_dist_transaction;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+\c regression - - :master_port
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db1' ;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+DROP DATABASE db1;
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db2' ;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+DROP DATABASE db2;
+\c - - - :worker_1_port
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db1' ;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+DROP DATABASE db1;
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db2' ;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+DROP DATABASE db2;
+\c - - - :worker_2_port
+-- Count of terminated sessions is not important for the test,
+-- it is just to make output predictable
+SELECT count(pg_terminate_backend(pid)) >= 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db1' ;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+DROP DATABASE db1;
+SELECT count(pg_terminate_backend(pid)) >= 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db2' ;
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
+DROP DATABASE db2;
diff --git a/src/test/regress/expected/multi_view.out b/src/test/regress/expected/multi_view.out
index 11f78ea34..3445f442a 100644
--- a/src/test/regress/expected/multi_view.out
+++ b/src/test/regress/expected/multi_view.out
@@ -92,7 +92,7 @@ SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1
326 | 7
(5 rows)
-CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR';
+CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part table_name_for_view WHERE l_shipmode = 'AIR';
-- join between view and table
SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey);
count
@@ -179,7 +179,7 @@ SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_linei
-- it passes planning, fails at execution stage
SET client_min_messages TO DEBUG1;
SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey) ORDER BY o_orderkey DESC, o_custkey DESC, o_orderpriority DESC LIMIT 5;
-DEBUG: generating subplan XXX_1 for subquery SELECT lineitem_hash_part.l_orderkey, lineitem_hash_part.l_partkey, lineitem_hash_part.l_suppkey, lineitem_hash_part.l_linenumber, lineitem_hash_part.l_quantity, lineitem_hash_part.l_extendedprice, lineitem_hash_part.l_discount, lineitem_hash_part.l_tax, lineitem_hash_part.l_returnflag, lineitem_hash_part.l_linestatus, lineitem_hash_part.l_shipdate, lineitem_hash_part.l_commitdate, lineitem_hash_part.l_receiptdate, lineitem_hash_part.l_shipinstruct, lineitem_hash_part.l_shipmode, lineitem_hash_part.l_comment FROM public.lineitem_hash_part WHERE (lineitem_hash_part.l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)
+DEBUG: generating subplan XXX_1 for subquery SELECT l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment FROM public.lineitem_hash_part table_name_for_view WHERE (l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5
DEBUG: push down of limit count: 5
o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment
diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out
index 1c1a7d935..bcfe06fba 100644
--- a/src/test/regress/expected/non_colocated_subquery_joins.out
+++ b/src/test/regress/expected/non_colocated_subquery_joins.out
@@ -1079,19 +1079,19 @@ SELECT create_distributed_table('table1','tenant_id');
(1 row)
-CREATE VIEW table1_view AS SELECT * from table1 where id < 100;
+CREATE VIEW table1_view AS SELECT * from table1 table_name_for_view where id < 100;
-- all of the above queries are non-colocated subquery joins
-- because the views are replaced with subqueries
UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
DEBUG: Router planner cannot handle multi-shard select queries
-DEBUG: generating subplan XXX_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100)
+DEBUG: generating subplan XXX_1 for subquery SELECT id, tenant_id FROM non_colocated_subquery.table1 table_name_for_view WHERE (id OPERATOR(pg_catalog.<) 100)
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2.id)
DEBUG: Creating router plan
UPDATE table2_p1 SET id=20 FROM table1_view WHERE table1_view.id=table2_p1.id;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
DEBUG: Router planner cannot handle multi-shard select queries
-DEBUG: generating subplan XXX_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100)
+DEBUG: generating subplan XXX_1 for subquery SELECT id, tenant_id FROM non_colocated_subquery.table1 table_name_for_view WHERE (id OPERATOR(pg_catalog.<) 100)
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2_p1 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2_p1.id)
DEBUG: Creating router plan
RESET client_min_messages;
diff --git a/src/test/regress/expected/pg12.out b/src/test/regress/expected/pg12.out
index 8999038ec..acc0c3f63 100644
--- a/src/test/regress/expected/pg12.out
+++ b/src/test/regress/expected/pg12.out
@@ -370,11 +370,13 @@ SELECT DISTINCT y FROM test;
(1 row)
-- non deterministic collations
+SET client_min_messages TO WARNING;
CREATE COLLATION test_pg12.case_insensitive (
provider = icu,
locale = '@colStrength=secondary',
deterministic = false
);
+RESET client_min_messages;
CREATE TABLE col_test (
id int,
val text collate case_insensitive
diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out
index 8483a2891..1b1d80df2 100644
--- a/src/test/regress/expected/pg14.out
+++ b/src/test/regress/expected/pg14.out
@@ -18,21 +18,21 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST) t1;
-NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
+NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
+NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST true) t1;
-NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
+NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
+NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST false) t1;
ERROR: PROCESS_TOAST required with VACUUM FULL
VACUUM (PROCESS_TOAST false) t1;
-NOTICE: issuing VACUUM pg14.t1_980000
+NOTICE: issuing VACUUM (PROCESS_TOAST FALSE) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-NOTICE: issuing VACUUM pg14.t1_980001
+NOTICE: issuing VACUUM (PROCESS_TOAST FALSE) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP AUTO) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000
@@ -62,14 +62,14 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP "AUTOX") t1;
ERROR: index_cleanup requires a Boolean value
VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1;
-NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000
+NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001
+NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1;
-NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
+NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
+NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
CREATE TABLE local_vacuum_table(name text);
@@ -1089,9 +1089,10 @@ SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one' ORDER BY
1 | 4 | one | -1
(1 row)
+\set VERBOSITY terse
SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- error
-ERROR: invalid reference to FROM-clause entry for table "j1_tbl"
-HINT: There is an entry for table "j1_tbl", but it cannot be referenced from this part of the query.
+ERROR: invalid reference to FROM-clause entry for table "j1_tbl" at character 57
+\set VERBOSITY default
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1 ORDER BY 1,2,3,4; -- ok
i | j | t | k
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out
index 667305225..fcbb0cd12 100644
--- a/src/test/regress/expected/pg15.out
+++ b/src/test/regress/expected/pg15.out
@@ -406,7 +406,7 @@ SELECT create_distributed_table('tbl2', 'x');
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
-- also, inside subqueries & ctes
WITH targq AS (
@@ -414,7 +414,7 @@ WITH targq AS (
)
MERGE INTO tbl1 USING targq ON (true)
WHEN MATCHED THEN DELETE;
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
WITH foo AS (
MERGE INTO tbl1 USING tbl2 ON (true)
@@ -431,7 +431,7 @@ USING tbl2
ON (true)
WHEN MATCHED THEN
DO NOTHING;
-ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from either a non-equi-join or a mismatch in the datatypes of the columns being joined.
+ERROR: The required join operation is missing between the target's distribution column and any expression originating from the source. The issue may arise from a non-equi-join.
DETAIL: Without a equi-join condition on the target's distribution column, the source rows cannot be efficiently redistributed, and the NOT-MATCHED condition cannot be evaluated unambiguously. This can result in incorrect or unexpected results when attempting to merge tables in a distributed setting
MERGE INTO tbl1 t
USING tbl2
@@ -868,7 +868,7 @@ SELECT create_reference_table('FKTABLE');
(1 row)
-- show that the definition is expected
-SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
+SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY 1;
pg_get_constraintdef
---------------------------------------------------------------------
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
@@ -881,8 +881,8 @@ SET search_path TO pg15;
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
pg_get_constraintdef
---------------------------------------------------------------------
- FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
+ FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
(2 rows)
-- also, make sure that it works as expected
@@ -1473,6 +1473,63 @@ SELECT run_command_on_workers($$DROP ACCESS METHOD heap2$$);
(localhost,57638,t,"DROP ACCESS METHOD")
(2 rows)
+CREATE TABLE referenced (int_col integer PRIMARY KEY);
+CREATE TABLE referencing (text_col text);
+SET citus.shard_replication_factor TO 1;
+SELECT create_distributed_table('referenced', null);
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT create_distributed_table('referencing', null);
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+RESET citus.shard_replication_factor;
+CREATE OR REPLACE FUNCTION my_random(numeric)
+ RETURNS numeric AS
+$$
+BEGIN
+ RETURN 7 * $1;
+END;
+$$
+LANGUAGE plpgsql IMMUTABLE;
+ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE NULLS DISTINCT REFERENCES referenced(int_col);
+ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (text_col::int * my_random(1)) STORED UNIQUE NULLS NOT DISTINCT;
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('pg15.referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "s", "constraint_names": ["referencing__fkey"], "referenced_tables": ["pg15.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["pg15.referencing"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}]
+ f | [{"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "s", "constraint_names": ["referencing__fkey", "referencing__fkey_960207"], "referenced_tables": ["pg15.referenced", "pg15.referenced_960206"], "referenced_columns": ["int_col"], "referencing_tables": ["pg15.referencing", "pg15.referencing_960207"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}]
+ f | [{"deferred": false, "deferable": false, "on_delete": "a", "on_update": "a", "match_type": "s", "constraint_names": ["referencing__fkey"], "referenced_tables": ["pg15.referenced"], "referenced_columns": ["int_col"], "referencing_tables": ["pg15.referencing"], "referencing_columns": ["test_2"], "referencing_columns_set_null_or_default": null}]
+(3 rows)
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_index_defs FROM get_index_defs('pg15', 'referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+ is_coordinator | result
+---------------------------------------------------------------------
+ t | [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON pg15.referencing USING btree (test_2)"], "indexnames": ["referencing__key"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON pg15.referencing USING btree (test_3) NULLS NOT DISTINCT"], "indexnames": ["referencing__key1"]}]
+ f | [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON pg15.referencing USING btree (test_2)", "CREATE UNIQUE INDEX referencing__key_960207 ON pg15.referencing_960207 USING btree (test_2)"], "indexnames": ["referencing__key", "referencing__key_960207"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON pg15.referencing USING btree (test_3) NULLS NOT DISTINCT", "CREATE UNIQUE INDEX referencing__key1_960207 ON pg15.referencing_960207 USING btree (test_3) NULLS NOT DISTINCT"], "indexnames": ["referencing__key1", "referencing__key1_960207"]}]
+ f | [{"indexdefs": ["CREATE UNIQUE INDEX referencing__key ON pg15.referencing USING btree (test_2)"], "indexnames": ["referencing__key"]}, {"indexdefs": ["CREATE UNIQUE INDEX referencing__key1 ON pg15.referencing USING btree (test_3) NULLS NOT DISTINCT"], "indexnames": ["referencing__key1"]}]
+(3 rows)
+
+set citus.log_remote_commands = true;
+set citus.grep_remote_commands = '%ALTER DATABASE%';
+alter database regression REFRESH COLLATION VERSION;
+NOTICE: version has not changed
+NOTICE: issuing ALTER DATABASE regression REFRESH COLLATION VERSION;
+NOTICE: issuing ALTER DATABASE regression REFRESH COLLATION VERSION;
+set citus.log_remote_commands = false;
-- Clean up
\set VERBOSITY terse
SET client_min_messages TO ERROR;
diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out
new file mode 100644
index 000000000..8d47b6f1b
--- /dev/null
+++ b/src/test/regress/expected/pg16.out
@@ -0,0 +1,1104 @@
+--
+-- PG16
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+\else
+\q
+\endif
+CREATE SCHEMA pg16;
+SET search_path TO pg16;
+SET citus.next_shard_id TO 950000;
+ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1400000;
+SET citus.shard_count TO 1;
+SET citus.shard_replication_factor TO 1;
+-- test the new vacuum and analyze options
+-- Relevant PG commits:
+-- https://github.com/postgres/postgres/commit/1cbbee03385763b066ae3961fc61f2cd01a0d0d7
+-- https://github.com/postgres/postgres/commit/4211fbd8413b26e0abedbe4338aa7cda2cd469b4
+-- https://github.com/postgres/postgres/commit/a46a7011b27188af526047a111969f257aaf4db8
+CREATE TABLE t1 (a int);
+SELECT create_distributed_table('t1','a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SET citus.log_remote_commands TO ON;
+VACUUM (PROCESS_MAIN FALSE) t1;
+NOTICE: issuing VACUUM (PROCESS_MAIN FALSE) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (PROCESS_MAIN FALSE, PROCESS_TOAST FALSE) t1;
+NOTICE: issuing VACUUM (PROCESS_TOAST FALSE,PROCESS_MAIN FALSE) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (PROCESS_MAIN TRUE) t1;
+NOTICE: issuing VACUUM pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (PROCESS_MAIN FALSE, FULL) t1;
+NOTICE: issuing VACUUM (FULL,PROCESS_MAIN FALSE) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (SKIP_DATABASE_STATS) t1;
+NOTICE: issuing VACUUM (SKIP_DATABASE_STATS) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (ONLY_DATABASE_STATS) t1;
+ERROR: ONLY_DATABASE_STATS cannot be specified with a list of tables
+VACUUM (BUFFER_USAGE_LIMIT '512 kB') t1;
+NOTICE: issuing VACUUM (BUFFER_USAGE_LIMIT 512) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (BUFFER_USAGE_LIMIT 0) t1;
+NOTICE: issuing VACUUM (BUFFER_USAGE_LIMIT 0) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+VACUUM (BUFFER_USAGE_LIMIT 16777220) t1;
+ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB
+VACUUM (BUFFER_USAGE_LIMIT -1) t1;
+ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB
+VACUUM (BUFFER_USAGE_LIMIT 'test') t1;
+ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB
+ANALYZE (BUFFER_USAGE_LIMIT '512 kB') t1;
+NOTICE: issuing ANALYZE (BUFFER_USAGE_LIMIT 512) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+ANALYZE (BUFFER_USAGE_LIMIT 0) t1;
+NOTICE: issuing ANALYZE (BUFFER_USAGE_LIMIT 0) pg16.t1_950000
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+SET citus.log_remote_commands TO OFF;
+-- only verifying it works and not printing log
+-- remote commands because it can be flaky
+VACUUM (ONLY_DATABASE_STATS);
+-- New GENERIC_PLAN option in EXPLAIN
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/3c05284
+CREATE TABLE tenk1 (
+ unique1 int4,
+ unique2 int4,
+ thousand int4
+);
+SELECT create_distributed_table('tenk1', 'unique1');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SET citus.log_remote_commands TO on;
+EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
+NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing SAVEPOINT citus_explain_savepoint
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing EXPLAIN (ANALYZE FALSE, VERBOSE FALSE, COSTS TRUE, BUFFERS FALSE, WAL FALSE, GENERIC_PLAN TRUE, TIMING FALSE, SUMMARY FALSE, FORMAT TEXT) SELECT unique1 FROM pg16.tenk1_950001 tenk1 WHERE (thousand OPERATOR(pg_catalog.=) 1000)
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing ROLLBACK TO SAVEPOINT citus_explain_savepoint
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing COMMIT
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+ QUERY PLAN
+---------------------------------------------------------------------
+ Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
+ Task Count: 1
+ Tasks Shown: All
+ -> Task
+ Node: host=localhost port=xxxxx dbname=regression
+ -> Seq Scan on tenk1_950001 tenk1 (cost=0.00..35.50 rows=10 width=4)
+ Filter: (thousand = 1000)
+(7 rows)
+
+EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
+ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together
+SET citus.log_remote_commands TO off;
+-- Proper error when creating statistics without a name on a Citus table
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/624aa2a13bd02dd584bb0995c883b5b93b2152df
+CREATE TABLE test_stats (
+ a int,
+ b int
+);
+SELECT create_distributed_table('test_stats', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+CREATE STATISTICS (dependencies) ON a, b FROM test_stats;
+ERROR: cannot create statistics without a name on a Citus table
+HINT: Consider specifying a name for the statistics
+CREATE STATISTICS (ndistinct, dependencies) on a, b from test_stats;
+ERROR: cannot create statistics without a name on a Citus table
+HINT: Consider specifying a name for the statistics
+CREATE STATISTICS (ndistinct, dependencies, mcv) on a, b from test_stats;
+ERROR: cannot create statistics without a name on a Citus table
+HINT: Consider specifying a name for the statistics
+-- STORAGE option in CREATE is already propagated by Citus
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/784cedd
+CREATE TABLE test_storage (a text, c text STORAGE plain);
+SELECT create_distributed_table('test_storage', 'a', shard_count := 2);
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT result FROM run_command_on_all_nodes
+($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
+ WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
+ result
+---------------------------------------------------------------------
+ {"(a,x)","(c,p)"}
+ {"(a,x)","(c,p)"}
+ {"(a,x)","(c,p)"}
+(3 rows)
+
+SELECT alter_distributed_table('test_storage', shard_count := 4);
+NOTICE: creating a new table for pg16.test_storage
+NOTICE: moving the data of pg16.test_storage
+NOTICE: dropping the old pg16.test_storage
+NOTICE: renaming the new table to pg16.test_storage
+ alter_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT result FROM run_command_on_all_nodes
+($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
+ WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
+ result
+---------------------------------------------------------------------
+ {"(a,x)","(c,p)"}
+ {"(a,x)","(c,p)"}
+ {"(a,x)","(c,p)"}
+(3 rows)
+
+SELECT undistribute_table('test_storage');
+NOTICE: creating a new table for pg16.test_storage
+NOTICE: moving the data of pg16.test_storage
+NOTICE: dropping the old pg16.test_storage
+NOTICE: renaming the new table to pg16.test_storage
+ undistribute_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT result FROM run_command_on_all_nodes
+($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
+ WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
+ result
+---------------------------------------------------------------------
+
+
+ {"(a,x)","(c,p)"}
+(3 rows)
+
+-- New option to change storage to DEFAULT in PG16
+-- ALTER TABLE .. ALTER COLUMN .. SET STORAGE is already
+-- not supported by Citus, so this is also not supported
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/b9424d0
+SELECT create_distributed_table('test_storage', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ALTER TABLE test_storage ALTER a SET STORAGE default;
+ERROR: alter table command is currently unsupported
+DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ENABLE|DISABLE|NO FORCE|FORCE ROW LEVEL SECURITY, ATTACH|DETACH PARTITION and TYPE subcommands are supported.
+-- New ICU_RULES option added to CREATE DATABASE
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/30a53b7
+CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0';
+NOTICE: Citus partially supports CREATE DATABASE for distributed databases
+DETAIL: Citus does not propagate CREATE DATABASE command to workers
+HINT: You can manually create a database and its extensions on workers.
+NOTICE: using standard form "und" for ICU locale ""
+SELECT result FROM run_command_on_workers
+($$CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'$$);
+ result
+---------------------------------------------------------------------
+ CREATE DATABASE
+ CREATE DATABASE
+(2 rows)
+
+CREATE TABLE test_db_table (a text);
+SELECT create_distributed_table('test_db_table', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO test_db_table VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green');
+-- icu default rules order
+SELECT * FROM test_db_table ORDER BY a COLLATE "en-x-icu";
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ bird
+ Boston
+ Graham
+ green
+(6 rows)
+
+-- regression database's default order
+SELECT * FROM test_db_table ORDER BY a;
+ a
+---------------------------------------------------------------------
+ Abernathy
+ Boston
+ Graham
+ apple
+ bird
+ green
+(6 rows)
+
+-- now see the order in the new database
+\c test_db
+CREATE EXTENSION citus;
+\c - - - :worker_1_port
+CREATE EXTENSION citus;
+\c - - - :worker_2_port
+CREATE EXTENSION citus;
+\c - - - :master_port
+SELECT 1 FROM citus_add_node('localhost', :worker_1_port);
+ ?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
+ ?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+CREATE TABLE test_db_table (a text);
+SELECT create_distributed_table('test_db_table', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO test_db_table VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green');
+-- icu default rules order
+SELECT * FROM test_db_table ORDER BY a COLLATE "en-x-icu";
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ bird
+ Boston
+ Graham
+ green
+(6 rows)
+
+-- test_db database's default order with ICU_RULES = '&a < g'
+SELECT * FROM test_db_table ORDER BY a;
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ green
+ bird
+ Boston
+ Graham
+(6 rows)
+
+\c regression
+\c - - - :master_port
+DROP DATABASE test_db;
+SELECT result FROM run_command_on_workers
+($$DROP DATABASE test_db$$);
+ result
+---------------------------------------------------------------------
+ DROP DATABASE
+ DROP DATABASE
+(2 rows)
+
+SET search_path TO pg16;
+-- New rules option added to CREATE COLLATION
+-- Similar to above test with CREATE DATABASE
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/30a53b7
+CREATE COLLATION default_rule (provider = icu, locale = '');
+NOTICE: using standard form "und" for ICU locale ""
+CREATE COLLATION special_rule (provider = icu, locale = '', rules = '&a < g');
+NOTICE: using standard form "und" for ICU locale ""
+CREATE TABLE test_collation_rules (a text);
+SELECT create_distributed_table('test_collation_rules', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO test_collation_rules VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green');
+SELECT collname, collprovider, colliculocale, collicurules
+FROM pg_collation
+WHERE collname like '%_rule%'
+ORDER BY 1;
+ collname | collprovider | colliculocale | collicurules
+---------------------------------------------------------------------
+ default_rule | i | und |
+ special_rule | i | und | &a < g
+(2 rows)
+
+SELECT * FROM test_collation_rules ORDER BY a COLLATE default_rule;
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ bird
+ Boston
+ Graham
+ green
+(6 rows)
+
+SELECT * FROM test_collation_rules ORDER BY a COLLATE special_rule;
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ green
+ bird
+ Boston
+ Graham
+(6 rows)
+
+\c - - - :worker_1_port
+SET search_path TO pg16;
+SELECT collname, collprovider, colliculocale, collicurules
+FROM pg_collation
+WHERE collname like '%_rule%'
+ORDER BY 1;
+ collname | collprovider | colliculocale | collicurules
+---------------------------------------------------------------------
+ default_rule | i | und |
+ special_rule | i | und | &a < g
+(2 rows)
+
+SELECT * FROM test_collation_rules ORDER BY a COLLATE default_rule;
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ bird
+ Boston
+ Graham
+ green
+(6 rows)
+
+SELECT * FROM test_collation_rules ORDER BY a COLLATE special_rule;
+ a
+---------------------------------------------------------------------
+ Abernathy
+ apple
+ green
+ bird
+ Boston
+ Graham
+(6 rows)
+
+\c - - - :master_port
+SET search_path TO pg16;
+SET citus.next_shard_id TO 951000;
+-- Foreign table TRUNCATE trigger
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/3b00a94
+SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
+ ?column?
+---------------------------------------------------------------------
+ 1
+(1 row)
+
+SET citus.use_citus_managed_tables TO ON;
+CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
+INSERT INTO foreign_table_test VALUES (1, 'text_test');
+CREATE EXTENSION postgres_fdw;
+CREATE SERVER foreign_server
+ FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (host 'localhost', port :'master_port', dbname 'regression');
+CREATE USER MAPPING FOR CURRENT_USER
+ SERVER foreign_server
+ OPTIONS (user 'postgres');
+CREATE FOREIGN TABLE foreign_table (
+ id integer NOT NULL,
+ data text,
+ a bigserial
+)
+ SERVER foreign_server
+ OPTIONS (schema_name 'pg16', table_name 'foreign_table_test');
+-- verify it's a Citus foreign table
+SELECT partmethod, repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'foreign_table'::regclass ORDER BY logicalrelid;
+ partmethod | repmodel
+---------------------------------------------------------------------
+ n | s
+(1 row)
+
+INSERT INTO foreign_table VALUES (2, 'test_2');
+INSERT INTO foreign_table_test VALUES (3, 'test_3');
+CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+CREATE FUNCTION trigger_func_on_shard() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func_on_shard(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+CREATE TRIGGER trig_stmt_before BEFORE TRUNCATE ON foreign_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+SET citus.override_table_visibility TO off;
+CREATE TRIGGER trig_stmt_shard_before BEFORE TRUNCATE ON foreign_table_951001
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func_on_shard();
+RESET citus.override_table_visibility;
+SELECT * FROM foreign_table ORDER BY 1;
+ id | data | a
+---------------------------------------------------------------------
+ 1 | text_test | 1
+ 2 | test_2 | 1
+ 3 | test_3 | 2
+(3 rows)
+
+TRUNCATE foreign_table;
+NOTICE: trigger_func() called: action = TRUNCATE, when = BEFORE, level = STATEMENT
+CONTEXT: PL/pgSQL function trigger_func() line XX at RAISE
+NOTICE: trigger_func_on_shard() called: action = TRUNCATE, when = BEFORE, level = STATEMENT
+CONTEXT: PL/pgSQL function trigger_func_on_shard() line XX at RAISE
+SELECT * FROM foreign_table ORDER BY 1;
+ id | data | a
+---------------------------------------------------------------------
+(0 rows)
+
+RESET citus.use_citus_managed_tables;
+--
+-- COPY FROM ... DEFAULT
+-- Already supported in Citus, adding all PG tests with a distributed table
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/9f8377f
+CREATE TABLE copy_default (
+ id integer PRIMARY KEY,
+ text_value text NOT NULL DEFAULT 'test',
+ ts_value timestamp without time zone NOT NULL DEFAULT '2022-07-05'
+);
+SELECT create_distributed_table('copy_default', 'id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- if DEFAULT is not specified, then the marker will be regular data
+COPY copy_default FROM stdin;
+SELECT * FROM copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | value | Mon Jul 04 00:00:00 2022
+ 2 | D | Tue Jul 05 00:00:00 2022
+(2 rows)
+
+TRUNCATE copy_default;
+COPY copy_default FROM stdin WITH (format csv);
+SELECT * FROM copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | value | Mon Jul 04 00:00:00 2022
+ 2 | \D | Tue Jul 05 00:00:00 2022
+(2 rows)
+
+TRUNCATE copy_default;
+-- DEFAULT cannot be used in binary mode
+COPY copy_default FROM stdin WITH (format binary, default '\D');
+ERROR: cannot specify DEFAULT in BINARY mode
+-- DEFAULT cannot be new line nor carriage return
+COPY copy_default FROM stdin WITH (default E'\n');
+ERROR: COPY default representation cannot use newline or carriage return
+COPY copy_default FROM stdin WITH (default E'\r');
+ERROR: COPY default representation cannot use newline or carriage return
+-- DELIMITER cannot appear in DEFAULT spec
+COPY copy_default FROM stdin WITH (delimiter ';', default 'test;test');
+ERROR: COPY delimiter must not appear in the DEFAULT specification
+-- CSV quote cannot appear in DEFAULT spec
+COPY copy_default FROM stdin WITH (format csv, quote '"', default 'test"test');
+ERROR: CSV quote character must not appear in the DEFAULT specification
+-- NULL and DEFAULT spec must be different
+COPY copy_default FROM stdin WITH (default '\N');
+ERROR: NULL specification and DEFAULT specification cannot be the same
+-- cannot use DEFAULT marker in column that has no DEFAULT value
+COPY copy_default FROM stdin WITH (default '\D');
+ERROR: unexpected default marker in COPY data
+DETAIL: Column "id" has no default value.
+CONTEXT: COPY copy_default, line 1: "\D value '2022-07-04'"
+COPY copy_default FROM stdin WITH (format csv, default '\D');
+ERROR: unexpected default marker in COPY data
+DETAIL: Column "id" has no default value.
+CONTEXT: COPY copy_default, line 1: "\D,value,2022-07-04"
+-- The DEFAULT marker must be unquoted and unescaped or it's not recognized
+COPY copy_default FROM stdin WITH (default '\D');
+SELECT * FROM copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | test | Mon Jul 04 00:00:00 2022
+ 2 | \D | Mon Jul 04 00:00:00 2022
+ 3 | "D" | Mon Jul 04 00:00:00 2022
+(3 rows)
+
+TRUNCATE copy_default;
+COPY copy_default FROM stdin WITH (format csv, default '\D');
+SELECT * FROM copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | test | Mon Jul 04 00:00:00 2022
+ 2 | \\D | Mon Jul 04 00:00:00 2022
+ 3 | \D | Mon Jul 04 00:00:00 2022
+(3 rows)
+
+TRUNCATE copy_default;
+-- successful usage of DEFAULT option in COPY
+COPY copy_default FROM stdin WITH (default '\D');
+SELECT * FROM copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | value | Mon Jul 04 00:00:00 2022
+ 2 | test | Sun Jul 03 00:00:00 2022
+ 3 | test | Tue Jul 05 00:00:00 2022
+(3 rows)
+
+TRUNCATE copy_default;
+COPY copy_default FROM stdin WITH (format csv, default '\D');
+SELECT * FROM copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | value | Mon Jul 04 00:00:00 2022
+ 2 | test | Sun Jul 03 00:00:00 2022
+ 3 | test | Tue Jul 05 00:00:00 2022
+(3 rows)
+
+TRUNCATE copy_default;
+\c - - - :worker_1_port
+COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
+SELECT * FROM pg16.copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | value | Mon Jul 04 00:00:00 2022
+ 2 | test | Sun Jul 03 00:00:00 2022
+ 3 | test | Tue Jul 05 00:00:00 2022
+(3 rows)
+
+\c - - - :master_port
+TRUNCATE pg16.copy_default;
+\c - - - :worker_2_port
+COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
+SELECT * FROM pg16.copy_default ORDER BY id;
+ id | text_value | ts_value
+---------------------------------------------------------------------
+ 1 | value | Mon Jul 04 00:00:00 2022
+ 2 | test | Sun Jul 03 00:00:00 2022
+ 3 | test | Tue Jul 05 00:00:00 2022
+(3 rows)
+
+\c - - - :master_port
+SET search_path TO pg16;
+SET citus.shard_count TO 1;
+SET citus.shard_replication_factor TO 1;
+-- DEFAULT cannot be used in COPY TO
+COPY (select 1 as test) TO stdout WITH (default '\D');
+ERROR: COPY DEFAULT only available using COPY FROM
+-- Tests for SQL/JSON: JSON_ARRAYAGG and JSON_OBJECTAGG aggregates
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/7081ac4
+SET citus.next_shard_id TO 952000;
+CREATE TABLE agg_test(a int, b serial);
+SELECT create_distributed_table('agg_test', 'a');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO agg_test SELECT i FROM generate_series(1, 5) i;
+-- JSON_ARRAYAGG with distribution key
+SELECT JSON_ARRAYAGG(a ORDER BY a),
+JSON_ARRAYAGG(a ORDER BY a RETURNING jsonb)
+FROM agg_test;
+ json_arrayagg | json_arrayagg
+---------------------------------------------------------------------
+ [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5]
+(1 row)
+
+-- JSON_ARRAYAGG with other column
+SELECT JSON_ARRAYAGG(b ORDER BY b),
+JSON_ARRAYAGG(b ORDER BY b RETURNING jsonb)
+FROM agg_test;
+ json_arrayagg | json_arrayagg
+---------------------------------------------------------------------
+ [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5]
+(1 row)
+
+-- JSON_ARRAYAGG with router query
+SET citus.log_remote_commands TO on;
+SELECT JSON_ARRAYAGG(a ORDER BY a),
+JSON_ARRAYAGG(a ORDER BY a RETURNING jsonb)
+FROM agg_test WHERE a = 2;
+NOTICE: issuing SELECT JSON_ARRAYAGG(a ORDER BY a RETURNING json) AS "json_arrayagg", JSON_ARRAYAGG(a ORDER BY a RETURNING jsonb) AS "json_arrayagg" FROM pg16.agg_test_952000 agg_test WHERE (a OPERATOR(pg_catalog.=) 2)
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+ json_arrayagg | json_arrayagg
+---------------------------------------------------------------------
+ [2] | [2]
+(1 row)
+
+RESET citus.log_remote_commands;
+-- JSON_OBJECTAGG with distribution key
+SELECT
+ JSON_OBJECTAGG(a: a),
+ JSON_ARRAYAGG(a ORDER BY a), -- for order
+ JSON_OBJECTAGG(a: a RETURNING jsonb)
+FROM
+ agg_test;
+ json_objectagg | json_arrayagg | json_objectagg
+---------------------------------------------------------------------
+ { "1" : 1, "2" : 2, "3" : 3, "4" : 4, "5" : 5 } | [1, 2, 3, 4, 5] | {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5}
+(1 row)
+
+-- JSON_OBJECTAGG with other column
+SELECT
+ JSON_OBJECTAGG(b: b),
+ JSON_ARRAYAGG(b ORDER BY b), -- for order
+ JSON_OBJECTAGG(b: b RETURNING jsonb)
+FROM
+ agg_test;
+ json_objectagg | json_arrayagg | json_objectagg
+---------------------------------------------------------------------
+ { "1" : 1, "2" : 2, "3" : 3, "4" : 4, "5" : 5 } | [1, 2, 3, 4, 5] | {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5}
+(1 row)
+
+-- JSON_OBJECTAGG with router query
+SET citus.log_remote_commands TO on;
+SELECT
+ JSON_OBJECTAGG(a: a),
+ JSON_OBJECTAGG(a: a RETURNING jsonb)
+FROM
+ agg_test WHERE a = 3;
+NOTICE: issuing SELECT JSON_OBJECTAGG(a : a RETURNING json) AS "json_objectagg", JSON_OBJECTAGG(a : a RETURNING jsonb) AS "json_objectagg" FROM pg16.agg_test_952000 agg_test WHERE (a OPERATOR(pg_catalog.=) 3)
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+ json_objectagg | json_objectagg
+---------------------------------------------------------------------
+ { "3" : 3 } | {"3": 3}
+(1 row)
+
+RESET citus.log_remote_commands;
+-- Tests for SQL/JSON: support the IS JSON predicate
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/6ee30209
+CREATE TABLE test_is_json (id bigserial, js text);
+SELECT create_distributed_table('test_is_json', 'id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO test_is_json(js) VALUES
+ (NULL),
+ (''),
+ ('123'),
+ ('"aaa "'),
+ ('true'),
+ ('null'),
+ ('[]'),
+ ('[1, "2", {}]'),
+ ('{}'),
+ ('{ "a": 1, "b": null }'),
+ ('{ "a": 1, "a": null }'),
+ ('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'),
+ ('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'),
+ ('aaa'),
+ ('{a:1}'),
+ ('["a",]');
+-- run IS JSON predicate in the worker nodes
+SELECT
+ js,
+ js IS JSON "JSON",
+ js IS NOT JSON "NOT JSON",
+ js IS JSON VALUE "VALUE",
+ js IS JSON OBJECT "OBJECT",
+ js IS JSON ARRAY "ARRAY",
+ js IS JSON SCALAR "SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ test_is_json ORDER BY js;
+ js | JSON | NOT JSON | VALUE | OBJECT | ARRAY | SCALAR | WITHOUT UNIQUE | WITH UNIQUE
+---------------------------------------------------------------------
+ | f | t | f | f | f | f | f | f
+ "aaa " | t | f | t | f | f | t | t | t
+ 123 | t | f | t | f | f | t | t | t
+ ["a",] | f | t | f | f | f | f | f | f
+ [1, "2", {}] | t | f | t | f | t | f | t | t
+ [] | t | f | t | f | t | f | t | t
+ aaa | f | t | f | f | f | f | f | f
+ null | t | f | t | f | f | t | t | t
+ true | t | f | t | f | f | t | t | t
+ { "a": 1, "a": null } | t | f | t | t | f | f | t | f
+ { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
+ { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
+ { "a": 1, "b": null } | t | f | t | t | f | f | t | t
+ {a:1} | f | t | f | f | f | f | f | f
+ {} | t | f | t | t | f | f | t | t
+ | | | | | | | |
+(16 rows)
+
+-- pull the data, and run IS JSON predicate in the coordinator
+WITH pulled_data as (SELECT js FROM test_is_json OFFSET 0)
+SELECT
+ js,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ pulled_data ORDER BY js;
+ js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
+---------------------------------------------------------------------
+ | f | t | f | f | f | f | f | f
+ "aaa " | t | f | t | f | f | t | t | t
+ 123 | t | f | t | f | f | t | t | t
+ ["a",] | f | t | f | f | f | f | f | f
+ [1, "2", {}] | t | f | t | f | t | f | t | t
+ [] | t | f | t | f | t | f | t | t
+ aaa | f | t | f | f | f | f | f | f
+ null | t | f | t | f | f | t | t | t
+ true | t | f | t | f | f | t | t | t
+ { "a": 1, "a": null } | t | f | t | t | f | f | t | f
+ { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
+ { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
+ { "a": 1, "b": null } | t | f | t | t | f | f | t | t
+ {a:1} | f | t | f | f | f | f | f | f
+ {} | t | f | t | t | f | f | t | t
+ | | | | | | | |
+(16 rows)
+
+SELECT
+ js,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ (SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js);
+ js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
+---------------------------------------------------------------------
+ 123 | t | f | t | f | f | t | t | t
+ "aaa " | t | f | t | f | f | t | t | t
+ true | t | f | t | f | f | t | t | t
+ null | t | f | t | f | f | t | t | t
+ [] | t | f | t | f | t | f | t | t
+ [1, "2", {}] | t | f | t | f | t | f | t | t
+ {} | t | f | t | t | f | f | t | t
+ { "a": 1, "b": null } | t | f | t | t | f | f | t | t
+ { "a": 1, "a": null } | t | f | t | t | f | f | t | f
+ { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
+ { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
+(11 rows)
+
+SELECT
+ js0,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ (SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js);
+ js0 | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
+---------------------------------------------------------------------
+ 123 | t | f | t | f | f | t | t | t
+ "aaa " | t | f | t | f | f | t | t | t
+ true | t | f | t | f | f | t | t | t
+ null | t | f | t | f | f | t | t | t
+ [] | t | f | t | f | t | f | t | t
+ [1, "2", {}] | t | f | t | f | t | f | t | t
+ {} | t | f | t | t | f | f | t | t
+ { "a": 1, "b": null } | t | f | t | t | f | f | t | t
+ { "a": 1, "a": null } | t | f | t | t | f | f | t | f
+ { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t
+ { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f
+(11 rows)
+
+SELECT
+ js,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ (SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js);
+ js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE
+---------------------------------------------------------------------
+ 123 | t | f | t | f | f | t | t | t
+ "aaa " | t | f | t | f | f | t | t | t
+ true | t | f | t | f | f | t | t | t
+ null | t | f | t | f | f | t | t | t
+ [] | t | f | t | f | t | f | t | t
+ [1, "2", {}] | t | f | t | f | t | f | t | t
+ {} | t | f | t | t | f | f | t | t
+ {"a": 1, "b": null} | t | f | t | t | f | f | t | t
+ {"a": null} | t | f | t | t | f | f | t | t
+ {"a": 1, "b": [{"a": 1}, {"a": 2}]} | t | f | t | t | f | f | t | t
+ {"a": 1, "b": [{"a": 2, "b": 0}]} | t | f | t | t | f | f | t | t
+(11 rows)
+
+-- SYSTEM_USER
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/0823d061
+CREATE TABLE table_name_for_view(id int, val_1 text);
+SELECT create_distributed_table('table_name_for_view', 'id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+INSERT INTO table_name_for_view VALUES (1, 'test');
+-- define a view that uses SYSTEM_USER keyword
+CREATE VIEW prop_view_1 AS
+ SELECT *, SYSTEM_USER AS su FROM table_name_for_view;
+SELECT * FROM prop_view_1;
+ id | val_1 | su
+---------------------------------------------------------------------
+ 1 | test |
+(1 row)
+
+-- check definition with SYSTEM_USER is correctly propagated to workers
+\c - - - :worker_1_port
+SELECT pg_get_viewdef('pg16.prop_view_1', true);
+ pg_get_viewdef
+---------------------------------------------------------------------
+ SELECT id, +
+ val_1, +
+ SYSTEM_USER AS su +
+ FROM pg16.table_name_for_view;
+(1 row)
+
+\c - - - :master_port
+SET search_path TO pg16;
+-- REINDEX DATABASE/SYSTEM name is optional
+-- We already don't propagate these commands automatically
+-- Testing here with run_command_on_workers
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/2cbc3c1
+REINDEX DATABASE;
+SELECT result FROM run_command_on_workers
+($$REINDEX DATABASE$$);
+ result
+---------------------------------------------------------------------
+ REINDEX
+ REINDEX
+(2 rows)
+
+REINDEX SYSTEM;
+SELECT result FROM run_command_on_workers
+($$REINDEX SYSTEM$$);
+ result
+---------------------------------------------------------------------
+ REINDEX
+ REINDEX
+(2 rows)
+
+--
+-- random_normal() to provide normally-distributed random numbers
+-- adding here the same tests as the ones with random() in aggregate_support.sql
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/38d8176
+--
+CREATE TABLE dist_table (dist_col int, agg_col numeric);
+SELECT create_distributed_table('dist_table', 'dist_col');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+CREATE TABLE ref_table (int_col int);
+SELECT create_reference_table('ref_table');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- Test the cases where the worker agg exec. returns no tuples.
+SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
+FROM (SELECT *, random_normal() FROM dist_table) a;
+ percentile_disc
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT PERCENTILE_DISC((2 > random_normal(stddev => 1, mean => 0))::int::numeric / 10)
+ WITHIN GROUP (ORDER BY agg_col)
+FROM dist_table
+LEFT JOIN ref_table ON TRUE;
+ percentile_disc
+---------------------------------------------------------------------
+
+(1 row)
+
+-- run the same queries after loading some data
+INSERT INTO dist_table VALUES (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25),
+ (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19);
+SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
+FROM (SELECT *, random_normal() FROM dist_table) a;
+ percentile_disc
+---------------------------------------------------------------------
+ 3.22
+(1 row)
+
+SELECT PERCENTILE_DISC((2 > random_normal(stddev => 1, mean => 0))::int::numeric / 10)
+ WITHIN GROUP (ORDER BY agg_col)
+FROM dist_table
+LEFT JOIN ref_table ON TRUE;
+ percentile_disc
+---------------------------------------------------------------------
+ 1.19
+(1 row)
+
+--
+-- PG16 added WITH ADMIN FALSE option to GRANT ROLE
+-- WITH ADMIN FALSE is the default, make sure we propagate correctly in Citus
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/e3ce2de
+--
+CREATE ROLE role1;
+CREATE ROLE role2;
+SET citus.log_remote_commands TO on;
+SET citus.grep_remote_commands = '%GRANT%';
+-- default admin option is false
+GRANT role1 TO role2;
+NOTICE: issuing GRANT role1 TO role2;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing GRANT role1 TO role2;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+REVOKE role1 FROM role2;
+-- should behave same as default
+GRANT role1 TO role2 WITH ADMIN FALSE;
+NOTICE: issuing GRANT role1 TO role2;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing GRANT role1 TO role2;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+REVOKE role1 FROM role2;
+-- with admin option and with admin true are the same
+GRANT role1 TO role2 WITH ADMIN OPTION;
+NOTICE: issuing GRANT role1 TO role2 WITH ADMIN OPTION;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing GRANT role1 TO role2 WITH ADMIN OPTION;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH ADMIN TRUE;
+NOTICE: issuing GRANT role1 TO role2 WITH ADMIN OPTION;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+NOTICE: issuing GRANT role1 TO role2 WITH ADMIN OPTION;
+DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
+REVOKE role1 FROM role2;
+RESET citus.log_remote_commands;
+RESET citus.grep_remote_commands;
+--
+-- PG16 added new options to GRANT ROLE
+-- inherit: https://github.com/postgres/postgres/commit/e3ce2de
+-- set: https://github.com/postgres/postgres/commit/3d14e17
+-- We don't propagate for now in Citus
+--
+GRANT role1 TO role2 WITH INHERIT FALSE;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH INHERIT TRUE;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH INHERIT OPTION;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH SET FALSE;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH SET TRUE;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH SET OPTION;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE role1 FROM role2;
+-- connect to worker node
+GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+SELECT roleid::regrole::text AS role, member::regrole::text,
+admin_option, inherit_option, set_option FROM pg_auth_members
+WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2;
+ role | member | admin_option | inherit_option | set_option
+---------------------------------------------------------------------
+ role1 | role2 | t | f | f
+(1 row)
+
+\c - - - :worker_1_port
+SELECT roleid::regrole::text AS role, member::regrole::text,
+admin_option, inherit_option, set_option FROM pg_auth_members
+WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2;
+ role | member | admin_option | inherit_option | set_option
+---------------------------------------------------------------------
+(0 rows)
+
+SET citus.enable_ddl_propagation TO off;
+GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE;
+RESET citus.enable_ddl_propagation;
+SELECT roleid::regrole::text AS role, member::regrole::text,
+admin_option, inherit_option, set_option FROM pg_auth_members
+WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2;
+ role | member | admin_option | inherit_option | set_option
+---------------------------------------------------------------------
+ role1 | role2 | t | f | f
+(1 row)
+
+\c - - - :master_port
+REVOKE role1 FROM role2;
+-- test REVOKES as well
+GRANT role1 TO role2;
+REVOKE SET OPTION FOR role1 FROM role2;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+REVOKE INHERIT OPTION FOR role1 FROM role2;
+NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes
+HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation.
+DROP ROLE role1, role2;
+-- test that everything works fine for roles that are not propagated
+SET citus.enable_ddl_propagation TO off;
+CREATE ROLE role3;
+CREATE ROLE role4;
+CREATE ROLE role5;
+RESET citus.enable_ddl_propagation;
+-- by default, admin option is false, inherit is true, set is true
+GRANT role3 TO role4;
+GRANT role3 TO role5 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE;
+SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role3' ORDER BY 1, 2;
+ role | member | admin_option | inherit_option | set_option
+---------------------------------------------------------------------
+ role3 | role4 | f | t | t
+ role3 | role5 | t | f | f
+(2 rows)
+
+DROP ROLE role3, role4, role5;
+\set VERBOSITY terse
+SET client_min_messages TO ERROR;
+DROP EXTENSION postgres_fdw CASCADE;
+DROP SCHEMA pg16 CASCADE;
diff --git a/src/test/regress/expected/pg16_0.out b/src/test/regress/expected/pg16_0.out
new file mode 100644
index 000000000..730c916ca
--- /dev/null
+++ b/src/test/regress/expected/pg16_0.out
@@ -0,0 +1,9 @@
+--
+-- PG16
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+\else
+\q
diff --git a/src/test/regress/expected/pgmerge.out b/src/test/regress/expected/pgmerge.out
index 895bf0680..a0f5d0c86 100644
--- a/src/test/regress/expected/pgmerge.out
+++ b/src/test/regress/expected/pgmerge.out
@@ -226,13 +226,14 @@ WHEN NOT MATCHED THEN
ERROR: permission denied for table target2
-- check if the target can be accessed from source relation subquery; we should
-- not be able to do so
+\set VERBOSITY terse
MERGE INTO target t
USING (SELECT * FROM source WHERE t.tid > sid) s
ON t.tid = s.sid
WHEN NOT MATCHED THEN
INSERT DEFAULT VALUES;
-ERROR: invalid reference to FROM-clause entry for table "t"
-HINT: There is an entry for table "t", but it cannot be referenced from this part of the query.
+ERROR: invalid reference to FROM-clause entry for table "t" at character 55
+\set VERBOSITY default
--
-- initial tests
--
@@ -660,13 +661,13 @@ SELECT * FROM target ORDER BY tid;
ROLLBACK;
-- and again with a subtle error: referring to non-existent target row for NOT MATCHED
+\set VERBOSITY terse
MERGE INTO target t
USING source AS s
ON t.tid = s.sid
WHEN NOT MATCHED THEN
INSERT (tid, balance) VALUES (t.tid, s.delta);
-ERROR: invalid reference to FROM-clause entry for table "t"
-HINT: There is an entry for table "t", but it cannot be referenced from this part of the query.
+ERROR: invalid reference to FROM-clause entry for table "t" at character 109
-- and again with a constant ON clause
BEGIN;
MERGE INTO target t
@@ -674,8 +675,7 @@ USING source AS s
ON (SELECT true)
WHEN NOT MATCHED THEN
INSERT (tid, balance) VALUES (t.tid, s.delta);
-ERROR: invalid reference to FROM-clause entry for table "t"
-HINT: There is an entry for table "t", but it cannot be referenced from this part of the query.
+ERROR: invalid reference to FROM-clause entry for table "t" at character 109
SELECT * FROM target ORDER BY tid;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
@@ -779,11 +779,11 @@ MERGE INTO wq_target t
USING wq_source s ON t.tid = s.sid
WHEN NOT MATCHED AND t.balance = 100 THEN
INSERT (tid) VALUES (s.sid);
-ERROR: invalid reference to FROM-clause entry for table "t"
-HINT: There is an entry for table "t", but it cannot be referenced from this part of the query.
+ERROR: invalid reference to FROM-clause entry for table "t" at character 80
SELECT * FROM wq_target;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
+\set VERBOSITY default
MERGE INTO wq_target t
USING wq_source s ON t.tid = s.sid
WHEN NOT MATCHED AND s.balance = 100 THEN
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 702d23f1f..c761efb3e 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -375,6 +375,158 @@ END;
CREATE PUBLICATION pubdep FOR TABLES IN SCHEMA deptest;
RESET citus.create_object_propagation;
DROP SCHEMA deptest CASCADE;
+--
+-- PG16 allows publications with schema and table of the same schema.
+-- backpatched to PG15
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/13a185f
+--
+CREATE SCHEMA publication2;
+CREATE TABLE publication2.test1 (id int);
+SELECT create_distributed_table('publication2.test1', 'id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+-- should be able to create publication with schema and table of the same
+-- schema
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1 WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+CREATE TABLE publication.test2 (id int);
+SELECT create_distributed_table('publication.test2', 'id');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLE publication.test2;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1, TABLE publication.test2 WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+-- should be able to have publication2 schema and its new table test2 in testpub_for_tbl_schema publication
+ALTER TABLE test2 SET SCHEMA publication2;
+-- should be able to add a table of the same schema to the schema publication
+CREATE TABLE publication2.test3 (x int primary key, y int, "column-1" int);
+SELECT create_distributed_table('publication2.test3', 'x');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLE publication2.test3;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1, TABLE publication2.test2, TABLE publication2.test3 WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+-- should be able to drop the table
+ALTER PUBLICATION testpub_for_tbl_schema DROP TABLE publication2.test3;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1, TABLE publication2.test2 WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+DROP PUBLICATION testpub_for_tbl_schema;
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2;
+-- should be able to set publication with schema and table of the same schema
+ALTER PUBLICATION testpub_for_tbl_schema SET TABLES IN SCHEMA publication2, TABLE publication2.test1 WHERE (id < 99);
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1 WHERE ((test1.id < 99)) WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+-- test that using column list for table is disallowed if any schemas are
+-- part of the publication
+DROP PUBLICATION testpub_for_tbl_schema;
+-- failure - cannot use column list and schema together
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test3(y);
+ERROR: cannot use column list for relation "publication2.test3" in publication "testpub_for_tbl_schema"
+DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
+-- ok - only publish schema
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2 WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+-- failure - add a table with column list when there is already a schema in the
+-- publication
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLE publication2.test3(y);
+ERROR: cannot use column list for relation "publication2.test3" in publication "testpub_for_tbl_schema"
+DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
+-- ok - only publish table with column list
+ALTER PUBLICATION testpub_for_tbl_schema SET TABLE publication2.test3(y);
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema FOR TABLE publication2.test3 (y) WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+-- failure - specify a schema when there is already a column list in the
+-- publication
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLES IN SCHEMA publication2;
+ERROR: cannot add schema to publication "testpub_for_tbl_schema"
+DETAIL: Schemas cannot be added if any tables that specify a column list are already part of the publication.
+-- failure - cannot SET column list and schema together
+ALTER PUBLICATION testpub_for_tbl_schema SET TABLES IN SCHEMA publication2, TABLE publication2.test3(y);
+ERROR: cannot use column list for relation "publication2.test3" in publication "testpub_for_tbl_schema"
+DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
+-- ok - drop table
+ALTER PUBLICATION testpub_for_tbl_schema DROP TABLE publication2.test3;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+ c
+---------------------------------------------------------------------
+ SELECT worker_create_or_replace_object('CREATE PUBLICATION testpub_for_tbl_schema WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')');
+(1 row)
+
+-- failure - cannot ADD column list and schema together
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLES IN SCHEMA publication2, TABLE publication2.test3(y);
+ERROR: cannot use column list for relation "publication2.test3" in publication "testpub_for_tbl_schema"
+DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements.
-- make sure we can sync all the publication metadata
SELECT start_metadata_sync_to_all_nodes();
start_metadata_sync_to_all_nodes
@@ -386,7 +538,9 @@ DROP PUBLICATION pubdep;
DROP PUBLICATION "pub-mix";
DROP PUBLICATION pubtables;
DROP PUBLICATION pubpartitioned;
+DROP PUBLICATION testpub_for_tbl_schema;
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
+DROP SCHEMA publication2 CASCADE;
diff --git a/src/test/regress/expected/query_single_shard_table.out b/src/test/regress/expected/query_single_shard_table.out
index 5716c570d..ad6037b65 100644
--- a/src/test/regress/expected/query_single_shard_table.out
+++ b/src/test/regress/expected/query_single_shard_table.out
@@ -293,7 +293,7 @@ DEBUG: Creating router plan
-- cartesian product with different table types
-- with other table types
SELECT COUNT(*) FROM distributed_table d1, nullkey_c1_t1;
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
ERROR: cannot perform distributed planning on this query
DETAIL: Cartesian products are currently unsupported
SELECT COUNT(*) FROM reference_table d1, nullkey_c1_t1;
@@ -552,14 +552,14 @@ SELECT COUNT(*) FROM nullkey_c1_t1 t1
WHERE t1.b IN (
SELECT b+1 FROM nullkey_c2_t2 t2 WHERE t2.b = t1.a
);
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
ERROR: cannot push down this subquery
DETAIL: nullkey_c2_t2 and nullkey_c1_t1 are not colocated
SELECT COUNT(*) FROM nullkey_c1_t1 t1
WHERE t1.b NOT IN (
SELECT a FROM nullkey_c2_t2 t2 WHERE t2.b > t1.a
);
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
ERROR: cannot push down this subquery
DETAIL: nullkey_c2_t2 and nullkey_c1_t1 are not colocated
-- join with a reference table
@@ -1101,21 +1101,21 @@ SELECT COUNT(*) FROM nullkey_c1_t1 t1
LEFT JOIN LATERAL (
SELECT * FROM distributed_table t2 WHERE t2.b > t1.a
) q USING(a);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
SELECT COUNT(*) FROM nullkey_c1_t1 t1
WHERE EXISTS (
SELECT * FROM distributed_table t2 WHERE t2.b > t1.a
);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
SELECT COUNT(*) FROM nullkey_c1_t1 t1
WHERE NOT EXISTS (
SELECT * FROM distributed_table t2 WHERE t2.b > t1.a
);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
SELECT COUNT(*) FROM nullkey_c1_t1 t1
@@ -1136,14 +1136,14 @@ SELECT COUNT(*) FROM distributed_table t1
LEFT JOIN LATERAL (
SELECT * FROM nullkey_c1_t1 t2 WHERE t2.b > t1.a
) q USING(a);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
SELECT COUNT(*) FROM distributed_table t1
WHERE EXISTS (
SELECT * FROM nullkey_c1_t1 t2 WHERE t2.b > t1.a
);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
SELECT COUNT(*) FROM distributed_table t1
@@ -1186,14 +1186,14 @@ SELECT COUNT(*) FROM nullkey_c1_t1 t1
WHERE t1.b IN (
SELECT b+1 FROM citus_local_table t2 WHERE t2.b = t1.a
);
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: direct joins between distributed and local tables are not supported
SELECT COUNT(*) FROM nullkey_c1_t1 t1
WHERE t1.b NOT IN (
SELECT a FROM citus_local_table t2 WHERE t2.b > t1.a
);
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: direct joins between distributed and local tables are not supported
SELECT COUNT(*) FROM nullkey_c1_t1 t1
@@ -1261,14 +1261,14 @@ SELECT COUNT(*) FROM citus_local_table t1
WHERE t1.b IN (
SELECT b+1 FROM nullkey_c1_t1 t2 WHERE t2.b = t1.a
);
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: direct joins between distributed and local tables are not supported
SELECT COUNT(*) FROM citus_local_table t1
WHERE t1.b NOT IN (
SELECT a FROM nullkey_c1_t1 t2 WHERE t2.b > t1.a
);
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
ERROR: direct joins between distributed and local tables are not supported
SELECT COUNT(*) FROM citus_local_table t1
@@ -1398,6 +1398,7 @@ INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c1_t2;
SET client_min_messages TO DEBUG2;
-- between two non-colocated single-shard tables
INSERT INTO nullkey_c1_t1 SELECT * FROM nullkey_c2_t1;
+DEBUG: Creating router plan
DEBUG: INSERT target relation and all source relations of the SELECT must be colocated in distributed INSERT ... SELECT
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
@@ -1423,25 +1424,30 @@ DEBUG: INSERT target relation and all source relations of the SELECT must be co
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 SELECT * FROM citus_local_table;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot select from a local relation when inserting into a distributed table
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO nullkey_c1_t1 SELECT * FROM postgres_local_table;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT can only select from distributed tables
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO reference_table SELECT * FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO distributed_table SELECT * FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
DETAIL: The target table's partition column should correspond to a partition column in the subquery.
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO citus_local_table SELECT * FROM nullkey_c1_t1;
+DEBUG: Creating router plan
DEBUG: distributed INSERT ... SELECT cannot insert into a local table that is added to metadata
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
@@ -1515,14 +1521,14 @@ WITH level_0 AS (
SELECT COUNT(*) FROM level_0;
DEBUG: CTE level_0 is going to be inlined via distributed planning
DEBUG: CTE level_1 is going to be inlined via distributed planning
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for CTE level_0: WITH level_1 AS (WITH RECURSIVE level_2_recursive(x) AS (VALUES (1) UNION ALL SELECT (nullkey_c1_t1.a OPERATOR(pg_catalog.+) 1) FROM (query_single_shard_table.nullkey_c1_t1 JOIN level_2_recursive level_2_recursive_1 ON ((nullkey_c1_t1.a OPERATOR(pg_catalog.=) level_2_recursive_1.x))) WHERE (nullkey_c1_t1.a OPERATOR(pg_catalog.<) 100)) SELECT level_2_recursive.x, distributed_table.a, distributed_table.b FROM (level_2_recursive JOIN query_single_shard_table.distributed_table ON ((level_2_recursive.x OPERATOR(pg_catalog.=) distributed_table.a)))) SELECT x, a, b FROM level_1
DEBUG: CTE level_1 is going to be inlined via distributed planning
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for CTE level_1: WITH RECURSIVE level_2_recursive(x) AS (VALUES (1) UNION ALL SELECT (nullkey_c1_t1.a OPERATOR(pg_catalog.+) 1) FROM (query_single_shard_table.nullkey_c1_t1 JOIN level_2_recursive level_2_recursive_1 ON ((nullkey_c1_t1.a OPERATOR(pg_catalog.=) level_2_recursive_1.x))) WHERE (nullkey_c1_t1.a OPERATOR(pg_catalog.<) 100)) SELECT level_2_recursive.x, distributed_table.a, distributed_table.b FROM (level_2_recursive JOIN query_single_shard_table.distributed_table ON ((level_2_recursive.x OPERATOR(pg_catalog.=) distributed_table.a)))
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
ERROR: recursive CTEs are not supported in distributed queries
-- grouping set
SELECT
@@ -1583,7 +1589,7 @@ DEBUG: Creating router plan
SELECT COUNT(*), b FROM nullkey_c1_t1 GROUP BY 2
HAVING (SELECT COUNT(*) FROM nullkey_c2_t1) > 0
ORDER BY 1,2;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS count FROM query_single_shard_table.nullkey_c2_t1
@@ -1880,8 +1886,6 @@ SELECT COALESCE(raw_events_first.user_id, users_ref_table.user_id)
FROM raw_events_first
RIGHT JOIN (users_ref_table LEFT JOIN raw_events_second ON users_ref_table.user_id = raw_events_second.user_id)
ON raw_events_first.user_id = users_ref_table.user_id;
-DEBUG: cannot perform a lateral outer join when a distributed subquery references a reference table
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-- using a full join
INSERT INTO agg_events (user_id, value_1_agg)
SELECT t1.user_id AS col1,
@@ -1912,8 +1916,6 @@ FROM users_ref_table
WHERE NOT EXISTS (SELECT 1
FROM raw_events_second
WHERE raw_events_second.user_id = users_ref_table.user_id);
-DEBUG: correlated subqueries are not supported when the FROM clause contains a reference table
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-- using inner join
INSERT INTO agg_events (user_id)
SELECT raw_events_first.user_id
@@ -1923,28 +1925,10 @@ INSERT INTO agg_events (user_id)
SELECT raw_events_first.user_id
FROM raw_events_first INNER JOIN users_ref_table ON raw_events_first.user_id = users_ref_table.user_id
WHERE raw_events_first.value_1 IN (10, 11,12) OR users_ref_table.user_id IN (1,2,3,4);
--- We could relax distributed insert .. select checks to allow pushing
--- down more clauses down to the worker nodes when inserting into a single
--- shard by selecting from a colocated one. We might want to do something
--- like https://github.com/citusdata/citus/pull/6772.
---
--- e.g., insert into null_shard_key_1/citus_local/reference
--- select * from null_shard_key_1/citus_local/reference limit 1
---
--- Below "limit / offset clause" test and some others are examples of this.
-- limit / offset clause
INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LIMIT 1;
-DEBUG: cannot push down this subquery
-DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
-DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first OFFSET 1;
-DEBUG: cannot push down this subquery
-DETAIL: Offset clause is currently unsupported when a subquery references a column from another query
-DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO agg_events (user_id) SELECT users_ref_table.user_id FROM users_ref_table LIMIT 1;
-DEBUG: cannot push down this subquery
-DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-- using a materialized cte
WITH cte AS MATERIALIZED
(SELECT max(value_1)+1 as v1_agg, user_id FROM raw_events_first GROUP BY user_id)
@@ -1955,15 +1939,9 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO raw_events_second
WITH cte AS MATERIALIZED (SELECT * FROM raw_events_first)
SELECT user_id * 1000, time, value_1, value_2, value_3, value_4 FROM cte;
-DEBUG: cannot push down this subquery
-DETAIL: CTEs in subqueries are currently unsupported
-DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO raw_events_second (user_id)
WITH cte AS MATERIALIZED (SELECT * FROM users_ref_table)
SELECT user_id FROM cte;
-DEBUG: cannot push down this subquery
-DETAIL: CTEs in subqueries are currently unsupported
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-- using a regular cte
WITH cte AS (SELECT * FROM raw_events_first)
INSERT INTO raw_events_second
@@ -2017,16 +1995,10 @@ INSERT INTO
raw_events_first(user_id)
(SELECT user_id FROM raw_events_first) INTERSECT
(SELECT user_id FROM raw_events_first);
-DEBUG: cannot push down this subquery
-DETAIL: Intersect and Except are currently unsupported
-DEBUG: Collecting INSERT ... SELECT results on coordinator
INSERT INTO
raw_events_first(user_id)
(SELECT user_id FROM users_ref_table) INTERSECT
(SELECT user_id FROM raw_events_first);
-DEBUG: cannot push down this subquery
-DETAIL: Intersect and Except are currently unsupported
-DEBUG: Collecting INSERT ... SELECT results on coordinator
-- group by clause inside subquery
INSERT INTO agg_events
(user_id)
@@ -2125,8 +2097,8 @@ ERROR: functions used in the WHERE/ON/WHEN clause of modification queries on di
UPDATE nullkey_c1_t1 SET b = 5 FROM nullkey_c1_t2 WHERE nullkey_c1_t1.b = nullkey_c1_t2.b;
DEBUG: Creating router plan
UPDATE nullkey_c1_t1 SET b = 5 FROM nullkey_c2_t1 WHERE nullkey_c1_t1.b = nullkey_c2_t1.b;
-DEBUG: found no worker with all shard placements
-ERROR: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
+ERROR: router planner does not support queries that reference non-colocated distributed tables
UPDATE nullkey_c1_t1 SET b = 5 FROM reference_table WHERE nullkey_c1_t1.b = reference_table.b;
DEBUG: Creating router plan
UPDATE nullkey_c1_t1 SET b = 5 FROM distributed_table WHERE nullkey_c1_t1.b = distributed_table.b;
@@ -2177,8 +2149,8 @@ DEBUG: Creating router plan
DELETE FROM nullkey_c1_t1 USING nullkey_c1_t2 WHERE nullkey_c1_t1.b = nullkey_c1_t2.b;
DEBUG: Creating router plan
DELETE FROM nullkey_c1_t1 USING nullkey_c2_t1 WHERE nullkey_c1_t1.b = nullkey_c2_t1.b;
-DEBUG: found no worker with all shard placements
-ERROR: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
+ERROR: router planner does not support queries that reference non-colocated distributed tables
DELETE FROM nullkey_c1_t1 USING reference_table WHERE nullkey_c1_t1.b = reference_table.b;
DEBUG: Creating router plan
DELETE FROM nullkey_c1_t1 USING distributed_table WHERE nullkey_c1_t1.b = distributed_table.b;
@@ -2506,7 +2478,7 @@ WITH cte AS (
DELETE FROM nullkey_c1_t1 WHERE a = 1 RETURNING *
)
SELECT * FROM distributed_table WHERE a IN (SELECT a FROM cte);
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for CTE cte: DELETE FROM query_single_shard_table.nullkey_c1_t1 WHERE (a OPERATOR(pg_catalog.=) 1) RETURNING a, b
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
@@ -2770,7 +2742,7 @@ WITH cte1 AS (
)
UPDATE non_colocated_users_table dt SET value = cte1.value_1
FROM cte1 WHERE cte1.user_id = dt.id AND dt.id = 1;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: generating subplan XXX_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM query_single_shard_table.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1)
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
@@ -2784,7 +2756,7 @@ WITH cte1 AS MATERIALIZED (
)
UPDATE non_colocated_users_table dt SET value = cte1.value_1 + cte2.event_type
FROM cte1, cte2 WHERE cte1.user_id = dt.id AND dt.id = 1;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: generating subplan XXX_1 for CTE cte1: SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM query_single_shard_table.users_table WHERE (user_id OPERATOR(pg_catalog.=) 1)
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
@@ -3034,21 +3006,21 @@ DEBUG: Creating router plan
SELECT event_type, (SELECT time FROM users_table WHERE user_id = e.user_id ORDER BY time LIMIT 1)
FROM non_colocated_events_table e
ORDER BY 1,2 LIMIT 1;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: push down of limit count: 1
ERROR: cannot push down this subquery
DETAIL: users_table and non_colocated_events_table are not colocated
SELECT event_type, (SELECT max(time) FROM users_table WHERE user_id = e.value_2)
FROM non_colocated_events_table e
ORDER BY 1,2 LIMIT 1;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: push down of limit count: 1
ERROR: cannot push down this subquery
DETAIL: users_table and non_colocated_events_table are not colocated
SELECT event_type, (SELECT max(time) FROM users_table)
FROM non_colocated_events_table e
ORDER BY 1,2 LIMIT 1;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: Distributed planning for a fast-path router query
DEBUG: Creating router plan
DEBUG: generating subplan XXX_1 for subquery SELECT max("time") AS max FROM query_single_shard_table.users_table
@@ -3125,7 +3097,7 @@ SELECT sum(e.user_id) + (SELECT max(value_3) FROM users_table WHERE user_id = e.
FROM non_colocated_events_table e
GROUP BY e.user_id
ORDER BY 1 LIMIT 3;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
ERROR: cannot push down subquery on the target list
DETAIL: Subqueries in the SELECT part of the query can only be pushed down if they happen before aggregates and window functions
SELECT e.user_id, sum((SELECT any_value(value_3) FROM users_reference_table WHERE user_id = e.user_id GROUP BY user_id)) OVER (PARTITION BY e.user_id)
@@ -3143,7 +3115,7 @@ SELECT (SELECT (SELECT e.user_id + user_id) FROM users_table WHERE user_id = e.u
FROM non_colocated_events_table e
GROUP BY 1
ORDER BY 1 LIMIT 3;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: push down of limit count: 3
ERROR: cannot push down this subquery
DETAIL: users_table and non_colocated_events_table are not colocated
@@ -3194,7 +3166,7 @@ SELECT (SELECT value_2 FROM view_1 WHERE user_id = e.user_id GROUP BY value_2)
FROM non_colocated_events_table e
GROUP BY 1
ORDER BY 1 LIMIT 3;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: push down of limit count: 3
ERROR: cannot push down this subquery
DETAIL: users_table and non_colocated_events_table are not colocated
@@ -3208,7 +3180,7 @@ GROUP BY user_id
(SELECT sum(user_id) FROM users_table WHERE user_id = u1.user_id GROUP BY user_id)
FROM users_table u1
GROUP BY user_id) as foo) ORDER BY 1 DESC;
-DEBUG: found no worker with all shard placements
+DEBUG: router planner does not support queries that reference non-colocated distributed tables
DEBUG: Creating router plan
DEBUG: generating subplan XXX_1 for subquery SELECT count(*) AS count FROM (SELECT (SELECT sum(users_table.user_id) AS sum FROM query_single_shard_table.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) u1.user_id) GROUP BY users_table.user_id) AS sum FROM query_single_shard_table.users_table u1 GROUP BY u1.user_id) foo
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, count(*) AS count FROM query_single_shard_table.non_colocated_events_table e1 GROUP BY user_id HAVING (count(*) OPERATOR(pg_catalog.>) (SELECT intermediate_result.count FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(count bigint))) ORDER BY user_id DESC
diff --git a/src/test/regress/expected/recurring_outer_join.out b/src/test/regress/expected/recurring_outer_join.out
index 4ff353838..0764f05dc 100644
--- a/src/test/regress/expected/recurring_outer_join.out
+++ b/src/test/regress/expected/recurring_outer_join.out
@@ -1187,17 +1187,16 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
-- same test using a view, can be recursively planned
CREATE VIEW my_view_1 AS
-SELECT * FROM dist_1 t2 WHERE EXISTS (
+SELECT * FROM dist_1 table_name_for_view WHERE EXISTS (
SELECT * FROM dist_1 t4
- WHERE t4.a = t2.a
-);
+ WHERE t4.a = table_name_for_view.a);
SELECT COUNT(*) FROM ref_1 t1
LEFT JOIN
my_view_1 t3
USING (a);
DEBUG: recursively planning right side of the left join since the outer side is a recurring rel
DEBUG: recursively planning the distributed subquery since it is part of a distributed join node that is outer joined with a recurring rel
-DEBUG: generating subplan XXX_1 for subquery SELECT t2.a, t2.b FROM recurring_outer_join.dist_1 t2 WHERE (EXISTS (SELECT t4.a, t4.b FROM recurring_outer_join.dist_1 t4 WHERE (t4.a OPERATOR(pg_catalog.=) t2.a)))
+DEBUG: generating subplan XXX_1 for subquery SELECT a, b FROM recurring_outer_join.dist_1 table_name_for_view WHERE (EXISTS (SELECT t4.a, t4.b FROM recurring_outer_join.dist_1 t4 WHERE (t4.a OPERATOR(pg_catalog.=) table_name_for_view.a)))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (recurring_outer_join.ref_1 t1 LEFT JOIN (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) t3 USING (a))
count
---------------------------------------------------------------------
diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out
index 8cd4d6ffa..975f501ef 100644
--- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out
+++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out
@@ -396,10 +396,10 @@ DROP VIEW numbers_v, local_table_v;
-- Joins between reference tables and materialized views are allowed to
-- be planned to be executed locally.
--
-CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10;
-NOTICE: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 numbers WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10))
+CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers table_name_for_view WHERE a BETWEEN 1 AND 10;
+NOTICE: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 table_name_for_view WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10))
REFRESH MATERIALIZED VIEW numbers_v;
-NOTICE: executing the command locally: SELECT numbers.a FROM replicate_ref_to_coordinator.numbers_8000001 numbers WHERE ((numbers.a OPERATOR(pg_catalog.>=) 1) AND (numbers.a OPERATOR(pg_catalog.<=) 10))
+NOTICE: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 table_name_for_view WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10))
SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a ORDER BY 1;
NOTICE: executing the command locally: SELECT squares.a, squares.b, numbers_v.a FROM (replicate_ref_to_coordinator.squares_8000000 squares JOIN replicate_ref_to_coordinator.numbers_v ON ((squares.a OPERATOR(pg_catalog.=) numbers_v.a))) ORDER BY squares.a
a | b | a
@@ -446,7 +446,7 @@ INSERT INTO local_table VALUES (1), (2), (3), (4);
INSERT INTO numbers VALUES (1), (2), (3), (4);
NOTICE: executing the command locally: INSERT INTO replicate_ref_to_coordinator.numbers_8000001 AS citus_table_alias (a) VALUES (1), (2), (3), (4)
ALTER TABLE numbers ADD COLUMN d int;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (8000001, 'replicate_ref_to_coordinator', 'ALTER TABLE numbers ADD COLUMN d int;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (8000001, 'replicate_ref_to_coordinator', 'ALTER TABLE numbers ADD COLUMN d integer;')
SELECT * FROM local_table JOIN numbers USING(a) ORDER BY a;
NOTICE: executing the command locally: SELECT local_table.a, numbers.d FROM (replicate_ref_to_coordinator.local_table JOIN replicate_ref_to_coordinator.numbers_8000001 numbers(a, d) USING (a)) ORDER BY local_table.a
a | d
diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out
index 4493f9614..28cb45688 100644
--- a/src/test/regress/expected/schema_based_sharding.out
+++ b/src/test/regress/expected/schema_based_sharding.out
@@ -391,8 +391,8 @@ SELECT EXISTS(
(1 row)
INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a');
-ERROR: insert or update on table "another_partitioned_table_child_1920090" violates foreign key constraint "another_partitioned_table_a_fkey_1920089"
-DETAIL: Key (a)=(1) is not present in table "partitioned_table_1920087".
+ERROR: insert or update on table "another_partitioned_table_child_1920088" violates foreign key constraint "another_partitioned_table_a_fkey_1920087"
+DETAIL: Key (a)=(1) is not present in table "partitioned_table_1920085".
CONTEXT: while executing command on localhost:xxxxx
INSERT INTO tenant_4.partitioned_table VALUES (1, 'a');
INSERT INTO tenant_4.another_partitioned_table VALUES (1, 'a');
@@ -1345,13 +1345,6 @@ DROP USER test_other_super_user;
CREATE ROLE test_non_super_user WITH LOGIN;
ALTER ROLE test_non_super_user NOSUPERUSER;
GRANT CREATE ON DATABASE regression TO test_non_super_user;
-SELECT result FROM run_command_on_workers($$GRANT CREATE ON DATABASE regression TO test_non_super_user$$);
- result
----------------------------------------------------------------------
- GRANT
- GRANT
-(2 rows)
-
GRANT CREATE ON SCHEMA public TO test_non_super_user ;
\c - test_non_super_user
SET search_path TO regular_schema;
@@ -1487,13 +1480,6 @@ $$);
\c - postgres
REVOKE CREATE ON DATABASE regression FROM test_non_super_user;
-SELECT result FROM run_command_on_workers($$REVOKE CREATE ON DATABASE regression FROM test_non_super_user$$);
- result
----------------------------------------------------------------------
- REVOKE
- REVOKE
-(2 rows)
-
REVOKE CREATE ON SCHEMA public FROM test_non_super_user;
DROP ROLE test_non_super_user;
\c - - - :worker_1_port
diff --git a/src/test/regress/expected/set_operations.out b/src/test/regress/expected/set_operations.out
index a0dad36a8..f2e0616e7 100644
--- a/src/test/regress/expected/set_operations.out
+++ b/src/test/regress/expected/set_operations.out
@@ -1119,7 +1119,7 @@ DEBUG: Creating router plan
-- queries on non-colocated tables that would push down if they were not colocated are recursivelu planned
SELECT * FROM (SELECT * FROM test UNION SELECT * FROM test_not_colocated) u ORDER BY 1,2;
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test
DEBUG: Router planner cannot handle multi-shard select queries
@@ -1135,7 +1135,7 @@ DEBUG: Creating router plan
(2 rows)
SELECT * FROM (SELECT * FROM test UNION ALL SELECT * FROM test_not_colocated) u ORDER BY 1,2;
-DEBUG: router planner does not support queries that reference non-colocated distributed tables
+DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM recursive_union.test
DEBUG: Router planner cannot handle multi-shard select queries
diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out
index b8f4010b1..7997b5e28 100644
--- a/src/test/regress/expected/shard_rebalancer.out
+++ b/src/test/regress/expected/shard_rebalancer.out
@@ -2553,12 +2553,18 @@ SELECT public.wait_until_metadata_sync(30000);
(1 row)
+-- errors out because shard replication factor > shard allowed node count
+SELECT rebalance_table_shards('test_rebalance_with_disabled_worker');
+ERROR: Shard replication factor (2) cannot be greater than number of nodes with should_have_shards=true (1).
+-- set replication factor to one, and try again
+SET citus.shard_replication_factor TO 1;
SELECT rebalance_table_shards('test_rebalance_with_disabled_worker');
rebalance_table_shards
---------------------------------------------------------------------
(1 row)
+SET citus.shard_replication_factor TO 2;
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
@@ -2575,7 +2581,7 @@ SELECT create_distributed_table('test_with_all_shards_excluded', 'a', colocate_w
(1 row)
-SELECT shardid FROM pg_dist_shard;
+SELECT shardid FROM pg_dist_shard ORDER BY shardid ASC;
shardid
---------------------------------------------------------------------
433504
diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out
index 4b6ea0837..f485763c5 100644
--- a/src/test/regress/expected/single_node.out
+++ b/src/test/regress/expected/single_node.out
@@ -177,8 +177,119 @@ WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
(1 row)
RESET citus.enable_schema_based_sharding;
+-- Test lazy conversion from Citus local to single-shard tables
+-- and reference tables, on single node. This means that no shard
+-- replication should be needed.
+CREATE TABLE ref_table_conversion_test (
+ a int PRIMARY KEY
+);
+SELECT citus_add_local_table_to_metadata('ref_table_conversion_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid and placementid
+SELECT get_shard_id_for_distribution_column('single_node.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset
+SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset
+SELECT create_reference_table('ref_table_conversion_test');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_reference_table('single_node.ref_table_conversion_test');
+ verify_pg_dist_partition_for_reference_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placements_for_reference_table('single_node.ref_table_conversion_test',
+ :ref_table_conversion_test_old_shard_id,
+ :ref_table_conversion_test_old_coord_placement_id);
+ verify_shard_placements_for_reference_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE TABLE single_shard_conversion_test_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_1');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_1') AS single_shard_conversion_test_1_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_test_1', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_1', :single_shard_conversion_test_1_old_shard_id, true);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE TABLE single_shard_conversion_test_2 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_2');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_2') AS single_shard_conversion_test_2_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_test_2', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_2');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_2', :single_shard_conversion_test_2_old_shard_id, true);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- make sure that they're created on different colocation groups
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'single_node.single_shard_conversion_test_1'::regclass
+)
+!=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'single_node.single_shard_conversion_test_2'::regclass
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
SET client_min_messages TO WARNING;
-DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2;
+DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2, ref_table_conversion_test, single_shard_conversion_test_1, single_shard_conversion_test_2;
DROP SCHEMA tenant_1 CASCADE;
RESET client_min_messages;
-- so that we don't have to update rest of the test output
@@ -2134,10 +2245,10 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
-- test with NULL columns
ALTER TABLE non_binary_copy_test ADD COLUMN z INT;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
WITH cte_1 AS
(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z)
SELECT bool_and(z is null) FROM cte_1;
diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out
index 4749b9b81..321d283f8 100644
--- a/src/test/regress/expected/single_node_0.out
+++ b/src/test/regress/expected/single_node_0.out
@@ -177,8 +177,119 @@ WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
(1 row)
RESET citus.enable_schema_based_sharding;
+-- Test lazy conversion from Citus local to single-shard tables
+-- and reference tables, on single node. This means that no shard
+-- replication should be needed.
+CREATE TABLE ref_table_conversion_test (
+ a int PRIMARY KEY
+);
+SELECT citus_add_local_table_to_metadata('ref_table_conversion_test');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid and placementid
+SELECT get_shard_id_for_distribution_column('single_node.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset
+SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset
+SELECT create_reference_table('ref_table_conversion_test');
+ create_reference_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_reference_table('single_node.ref_table_conversion_test');
+ verify_pg_dist_partition_for_reference_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placements_for_reference_table('single_node.ref_table_conversion_test',
+ :ref_table_conversion_test_old_shard_id,
+ :ref_table_conversion_test_old_coord_placement_id);
+ verify_shard_placements_for_reference_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE TABLE single_shard_conversion_test_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_1');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_1') AS single_shard_conversion_test_1_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_test_1', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_1');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_1', :single_shard_conversion_test_1_old_shard_id, true);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+CREATE TABLE single_shard_conversion_test_2 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_2');
+ citus_add_local_table_to_metadata
+---------------------------------------------------------------------
+
+(1 row)
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_2') AS single_shard_conversion_test_2_old_shard_id \gset
+SELECT create_distributed_table('single_shard_conversion_test_2', null, colocate_with=>'none');
+ create_distributed_table
+---------------------------------------------------------------------
+
+(1 row)
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_2');
+ verify_pg_dist_partition_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_2', :single_shard_conversion_test_2_old_shard_id, true);
+ verify_shard_placement_for_single_shard_table
+---------------------------------------------------------------------
+ t
+(1 row)
+
+-- make sure that they're created on different colocation groups
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'single_node.single_shard_conversion_test_1'::regclass
+)
+!=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'single_node.single_shard_conversion_test_2'::regclass
+);
+ ?column?
+---------------------------------------------------------------------
+ t
+(1 row)
+
SET client_min_messages TO WARNING;
-DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2;
+DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2, ref_table_conversion_test, single_shard_conversion_test_1, single_shard_conversion_test_2;
DROP SCHEMA tenant_1 CASCADE;
RESET client_min_messages;
-- so that we don't have to update rest of the test output
@@ -2134,10 +2245,10 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in
-- test with NULL columns
ALTER TABLE non_binary_copy_test ADD COLUMN z INT;
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
-NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z INT;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
+NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;')
WITH cte_1 AS
(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z)
SELECT bool_and(z is null) FROM cte_1;
diff --git a/src/test/regress/expected/single_node_enterprise.out b/src/test/regress/expected/single_node_enterprise.out
index 305a02b8e..79f231864 100644
--- a/src/test/regress/expected/single_node_enterprise.out
+++ b/src/test/regress/expected/single_node_enterprise.out
@@ -411,6 +411,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM single_nod
ROLLBACK;
NOTICE: issuing ROLLBACK
+SET citus.shard_replication_factor TO 1;
-- now, lets move all the shards of distributed tables out of the coordinator
-- block writes is much faster for the sake of the test timings we prefer it
SELECT master_drain_node('localhost', :master_port, shard_transfer_mode:='block_writes');
diff --git a/src/test/regress/expected/single_shard_table_udfs.out b/src/test/regress/expected/single_shard_table_udfs.out
index d49027b60..26b3f82cc 100644
--- a/src/test/regress/expected/single_shard_table_udfs.out
+++ b/src/test/regress/expected/single_shard_table_udfs.out
@@ -121,7 +121,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'null_dist_key_table'::regclass;
SELECT column_name_to_column('null_dist_key_table', 'a');
column_name_to_column
---------------------------------------------------------------------
- {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
+ {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row)
SELECT master_update_shard_statistics(shardid)
diff --git a/src/test/regress/expected/sqlancer_failures.out b/src/test/regress/expected/sqlancer_failures.out
index d4d06a4e4..c6df1f68d 100644
--- a/src/test/regress/expected/sqlancer_failures.out
+++ b/src/test/regress/expected/sqlancer_failures.out
@@ -416,11 +416,11 @@ ON (true);
SELECT
COUNT(unsupported_join.*)
FROM
- (distributed_table a
+ ((distributed_table a
LEFT JOIN reference_table b ON (true)
- RIGHT JOIN reference_table c ON (false)) as unsupported_join
+ RIGHT JOIN reference_table c ON (false))
RIGHT JOIN
- (reference_table d JOIN reference_table e ON(true)) ON (true);
+ (reference_table d JOIN reference_table e ON(true)) ON (true)) as unsupported_join;
count
---------------------------------------------------------------------
125
@@ -429,9 +429,9 @@ RIGHT JOIN
SELECT
COUNT(unsupported_join.*)
FROM
- (distributed_table a
+ ((distributed_table a
LEFT JOIN (SELECT * FROM reference_table OFFSET 0) b ON (true)
- RIGHT JOIN (SELECT * FROM reference_table OFFSET 0) c ON (false)) as unsupported_join
+ RIGHT JOIN (SELECT * FROM reference_table OFFSET 0) c ON (false))
RIGHT JOIN
(
(SELECT * FROM reference_table OFFSET 0) d
@@ -439,7 +439,7 @@ RIGHT JOIN
(SELECT * FROM reference_table OFFSET 0) e
ON(true)
)
-ON (true);
+ON (true)) as unsupported_join;
count
---------------------------------------------------------------------
125
diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out
index daa23aecd..ec9b0a034 100644
--- a/src/test/regress/expected/start_stop_metadata_sync.out
+++ b/src/test/regress/expected/start_stop_metadata_sync.out
@@ -158,12 +158,12 @@ SELECT * FROM test_matview;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
- events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
- events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
- events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
- events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
- events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
- events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
+ events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
+ events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
+ events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
+ events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
+ events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
+ events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
(6 rows)
SELECT count(*) > 0 FROM pg_dist_node;
diff --git a/src/test/regress/expected/subquery_local_tables.out b/src/test/regress/expected/subquery_local_tables.out
index ce5e844aa..42e691c87 100644
--- a/src/test/regress/expected/subquery_local_tables.out
+++ b/src/test/regress/expected/subquery_local_tables.out
@@ -126,43 +126,6 @@ DEBUG: push down of limit count: 3
4
(3 rows)
--- subquery in FROM -> FROM -> WHERE -> WHERE should be replaced if
--- it contains onle local tables
--- Later the upper level query is also recursively planned due to LIMIT
-SELECT user_id, array_length(events_table, 1)
-FROM (
- SELECT user_id, array_agg(event ORDER BY time) AS events_table
- FROM (
- SELECT
- u.user_id, e.event_type::text AS event, e.time
- FROM
- users_table AS u,
- events_table AS e
- WHERE u.user_id = e.user_id AND
- u.user_id IN
- (
- SELECT
- user_id
- FROM
- users_table
- WHERE value_2 >= 5
- AND EXISTS (SELECT user_id FROM events_table_local WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1)
- AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id)
- LIMIT 5
- )
- ) t
- GROUP BY user_id
-) q
-ORDER BY 2 DESC, 1;
-DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM subquery_local_tables.events_table_local WHERE ((event_type OPERATOR(pg_catalog.>) 1) AND (event_type OPERATOR(pg_catalog.<=) 3) AND (value_3 OPERATOR(pg_catalog.>) (1)::double precision))
-DEBUG: push down of limit count: 5
-DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5
-DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id
- user_id | array_length
----------------------------------------------------------------------
- 5 | 364
-(1 row)
-
-- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to local tables
SELECT
user_id
diff --git a/src/test/regress/expected/undistribute_table.out b/src/test/regress/expected/undistribute_table.out
index 98b1d98f1..15d0e2695 100644
--- a/src/test/regress/expected/undistribute_table.out
+++ b/src/test/regress/expected/undistribute_table.out
@@ -304,18 +304,18 @@ SELECT create_distributed_table('view_table', 'a');
INSERT INTO view_table VALUES (1, 2, 3), (2, 4, 6), (3, 6, 9);
CREATE SCHEMA another_schema;
-CREATE VIEW undis_view1 AS SELECT a, b FROM view_table;
-CREATE VIEW undis_view2 AS SELECT a, c FROM view_table;
+CREATE VIEW undis_view1 AS SELECT a, b FROM view_table table_name_for_view;
+CREATE VIEW undis_view2 AS SELECT a, c FROM view_table table_name_for_view;
CREATE VIEW another_schema.undis_view3 AS SELECT b, c FROM undis_view1 JOIN undis_view2 ON undis_view1.a = undis_view2.a;
SELECT schemaname, viewname, viewowner, definition FROM pg_views WHERE viewname LIKE 'undis\_view%' ORDER BY viewname;
schemaname | viewname | viewowner | definition
---------------------------------------------------------------------
- undistribute_table | undis_view1 | postgres | SELECT view_table.a, +
- | | | view_table.b +
- | | | FROM view_table;
- undistribute_table | undis_view2 | postgres | SELECT view_table.a, +
- | | | view_table.c +
- | | | FROM view_table;
+ undistribute_table | undis_view1 | postgres | SELECT a, +
+ | | | b +
+ | | | FROM view_table table_name_for_view;
+ undistribute_table | undis_view2 | postgres | SELECT a, +
+ | | | c +
+ | | | FROM view_table table_name_for_view;
another_schema | undis_view3 | postgres | SELECT undis_view1.b, +
| | | undis_view2.c +
| | | FROM (undis_view1 +
@@ -348,12 +348,12 @@ NOTICE: renaming the new table to undistribute_table.view_table
SELECT schemaname, viewname, viewowner, definition FROM pg_views WHERE viewname LIKE 'undis\_view%' ORDER BY viewname;
schemaname | viewname | viewowner | definition
---------------------------------------------------------------------
- undistribute_table | undis_view1 | postgres | SELECT view_table.a, +
- | | | view_table.b +
- | | | FROM view_table;
- undistribute_table | undis_view2 | postgres | SELECT view_table.a, +
- | | | view_table.c +
- | | | FROM view_table;
+ undistribute_table | undis_view1 | postgres | SELECT a, +
+ | | | b +
+ | | | FROM view_table table_name_for_view;
+ undistribute_table | undis_view2 | postgres | SELECT a, +
+ | | | c +
+ | | | FROM view_table table_name_for_view;
another_schema | undis_view3 | postgres | SELECT undis_view1.b, +
| | | undis_view2.c +
| | | FROM (undis_view1 +
@@ -400,22 +400,6 @@ NOTICE: renaming the new table to undistribute_table.dist_type_table
(1 row)
--- test CREATE RULE with ON SELECT
-CREATE TABLE rule_table_1 (a INT);
-CREATE TABLE rule_table_2 (a INT);
-SELECT create_distributed_table('rule_table_2', 'a');
- create_distributed_table
----------------------------------------------------------------------
-
-(1 row)
-
-CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2;
--- the CREATE RULE turns rule_table_1 into a view
-ALTER EXTENSION plpgsql ADD VIEW rule_table_1;
-NOTICE: Citus does not propagate adding/dropping member objects
-HINT: You can add/drop the member objects on the workers as well.
-SELECT undistribute_table('rule_table_2');
-ERROR: cannot alter table because an extension depends on it
-- test CREATE RULE without ON SELECT
CREATE TABLE rule_table_3 (a INT);
CREATE TABLE rule_table_4 (a INT);
@@ -444,9 +428,6 @@ NOTICE: renaming the new table to undistribute_table.rule_table_4
ALTER EXTENSION plpgsql DROP VIEW extension_view;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
-ALTER EXTENSION plpgsql DROP VIEW rule_table_1;
-NOTICE: Citus does not propagate adding/dropping member objects
-HINT: You can add/drop the member objects on the workers as well.
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
@@ -456,11 +437,9 @@ DETAIL: drop cascades to view undis_view1
drop cascades to view undis_view2
drop cascades to view another_schema.undis_view3
DROP SCHEMA undistribute_table, another_schema CASCADE;
-NOTICE: drop cascades to 7 other objects
+NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table extension_table
drop cascades to view extension_view
drop cascades to table dist_type_table
-drop cascades to table rule_table_2
-drop cascades to view rule_table_1
drop cascades to table rule_table_3
drop cascades to table rule_table_4
diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out
index e724b81d3..1bfbfc989 100644
--- a/src/test/regress/expected/upgrade_basic_after.out
+++ b/src/test/regress/expected/upgrade_basic_after.out
@@ -400,3 +400,12 @@ SELECT * FROM t_range ORDER BY id;
(9 rows)
ROLLBACK;
+-- There is a difference in partkey Var representation between PG16 and older versions
+-- Sanity check here that we can properly do column_to_column_name
+SELECT column_to_column_name(logicalrelid, partkey)
+FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;
+ column_to_column_name
+---------------------------------------------------------------------
+ a
+(1 row)
+
diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out
index 880747a38..9abb6c806 100644
--- a/src/test/regress/expected/upgrade_basic_before.out
+++ b/src/test/regress/expected/upgrade_basic_before.out
@@ -65,3 +65,12 @@ UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2;
\copy t_range FROM STDIN with (DELIMITER ',')
\copy t_range FROM STDIN with (DELIMITER ',')
+-- There is a difference in partkey Var representation between PG16 and older versions
+-- Sanity check here that we can properly do column_to_column_name
+SELECT column_to_column_name(logicalrelid, partkey)
+FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;
+ column_to_column_name
+---------------------------------------------------------------------
+ a
+(1 row)
+
diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out
index 3e9698788..36bd504e8 100644
--- a/src/test/regress/expected/upgrade_list_citus_objects.out
+++ b/src/test/regress/expected/upgrade_list_citus_objects.out
@@ -5,6 +5,8 @@ WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass
AND refobjid = e.oid
AND deptype = 'e'
AND e.extname='citus'
+ AND pg_catalog.pg_describe_object(classid, objid, 0) != 'function any_value(anyelement)'
+ AND pg_catalog.pg_describe_object(classid, objid, 0) != 'function any_value_agg(anyelement,anyelement)'
ORDER BY 1;
description
---------------------------------------------------------------------
@@ -13,8 +15,6 @@ ORDER BY 1;
function alter_old_partitions_set_access_method(regclass,timestamp with time zone,name)
function alter_role_if_exists(text,text)
function alter_table_set_access_method(regclass,text)
- function any_value(anyelement)
- function any_value_agg(anyelement,anyelement)
function array_cat_agg(anycompatiblearray)
function assign_distributed_transaction_id(integer,bigint,timestamp with time zone)
function authinfo_valid(text)
@@ -73,6 +73,7 @@ ORDER BY 1;
function citus_internal_adjust_local_clock_to_remote(cluster_clock)
function citus_internal_delete_colocation_metadata(integer)
function citus_internal_delete_partition_metadata(regclass)
+ function citus_internal_delete_placement_metadata(bigint)
function citus_internal_delete_shard_metadata(bigint)
function citus_internal_delete_tenant_schema(oid)
function citus_internal_global_blocked_processes()
@@ -82,6 +83,7 @@ ORDER BY 1;
function citus_internal_start_replication_origin_tracking()
function citus_internal_stop_replication_origin_tracking()
function citus_internal_unregister_tenant_schema_globally(oid,text)
+ function citus_internal_update_none_dist_table_metadata(oid,"char",bigint,boolean)
function citus_internal_update_placement_metadata(bigint,integer,integer)
function citus_internal_update_relation_colocation(oid,integer)
function citus_is_clock_after(cluster_clock,cluster_clock)
@@ -103,6 +105,7 @@ ORDER BY 1;
function citus_nodeid_for_gpid(bigint)
function citus_nodename_for_nodeid(integer)
function citus_nodeport_for_nodeid(integer)
+ function citus_pause_node_within_txn(integer,boolean,integer)
function citus_pid_for_gpid(bigint)
function citus_prepare_pg_upgrade()
function citus_query_stats()
@@ -115,6 +118,8 @@ ORDER BY 1;
function citus_remove_node(text,integer)
function citus_run_local_command(text)
function citus_schema_distribute(regnamespace)
+ function citus_schema_move(regnamespace,integer,citus.shard_transfer_mode)
+ function citus_schema_move(regnamespace,text,integer,citus.shard_transfer_mode)
function citus_schema_undistribute(regnamespace)
function citus_server_id()
function citus_set_coordinator_host(text,integer,noderole,name)
@@ -338,5 +343,5 @@ ORDER BY 1;
view citus_stat_tenants_local
view pg_dist_shard_placement
view time_partitions
-(330 rows)
+(333 rows)
diff --git a/src/test/regress/expected/view_propagation.out b/src/test/regress/expected/view_propagation.out
index d3d5bdb7b..5591a962f 100644
--- a/src/test/regress/expected/view_propagation.out
+++ b/src/test/regress/expected/view_propagation.out
@@ -316,13 +316,13 @@ UNION ALL
employees e
INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id;
-- Aliases are supported
-CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6;
+CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6 table_name_for_view;
-- View options are supported
CREATE VIEW opt_prop_view
WITH(check_option=CASCADED, security_barrier=true)
- AS SELECT * FROM view_table_6;
+ AS SELECT * FROM view_table_6 table_name_for_view;
CREATE VIEW sep_opt_prop_view
- AS SELECT * FROM view_table_6
+ AS SELECT * FROM view_table_6 table_name_for_view
WITH LOCAL CHECK OPTION;
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1;
obj_identifier
@@ -335,27 +335,27 @@ SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as
-- Check definitions and reltoptions of views are correct on workers
\c - - - :worker_1_port
SELECT definition FROM pg_views WHERE viewname = 'aliased_opt_prop_view';
- definition
+ definition
---------------------------------------------------------------------
- SELECT view_table_6.id AS alias_1, +
- view_table_6.val_1 AS alias_2 +
- FROM view_prop_schema.view_table_6;
+ SELECT id AS alias_1, +
+ val_1 AS alias_2 +
+ FROM view_prop_schema.view_table_6 table_name_for_view;
(1 row)
SELECT definition FROM pg_views WHERE viewname = 'opt_prop_view';
- definition
+ definition
---------------------------------------------------------------------
- SELECT view_table_6.id, +
- view_table_6.val_1 +
- FROM view_prop_schema.view_table_6;
+ SELECT id, +
+ val_1 +
+ FROM view_prop_schema.view_table_6 table_name_for_view;
(1 row)
SELECT definition FROM pg_views WHERE viewname = 'sep_opt_prop_view';
- definition
+ definition
---------------------------------------------------------------------
- SELECT view_table_6.id, +
- view_table_6.val_1 +
- FROM view_prop_schema.view_table_6;
+ SELECT id, +
+ val_1 +
+ FROM view_prop_schema.view_table_6 table_name_for_view;
(1 row)
SELECT relname, reloptions
@@ -444,7 +444,7 @@ SELECT create_distributed_table('alter_view_table','id');
(1 row)
-CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table;
+CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table table_name_for_view;
-- Set/drop default value is not supported by Citus
ALTER VIEW alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text;
ERROR: Citus doesn't support setting or resetting default values for a column of view
@@ -465,11 +465,11 @@ ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier);
ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier = true);
-- Check the definition on both coordinator and worker node
SELECT definition FROM pg_views WHERE viewname = 'alter_view_1';
- definition
+ definition
---------------------------------------------------------------------
- SELECT alter_view_table.id,+
- alter_view_table.val1 +
- FROM alter_view_table;
+ SELECT id, +
+ val1 +
+ FROM alter_view_table table_name_for_view;
(1 row)
SELECT relname, reloptions
@@ -482,11 +482,11 @@ WHERE oid = 'view_prop_schema.alter_view_1'::regclass::oid;
\c - - - :worker_1_port
SELECT definition FROM pg_views WHERE viewname = 'alter_view_1';
- definition
+ definition
---------------------------------------------------------------------
- SELECT alter_view_table.id, +
- alter_view_table.val1 +
- FROM view_prop_schema.alter_view_table;
+ SELECT id, +
+ val1 +
+ FROM view_prop_schema.alter_view_table table_name_for_view;
(1 row)
SELECT relname, reloptions
diff --git a/src/test/regress/expected/with_modifying.out b/src/test/regress/expected/with_modifying.out
index 70418251b..dc624cc83 100644
--- a/src/test/regress/expected/with_modifying.out
+++ b/src/test/regress/expected/with_modifying.out
@@ -956,8 +956,10 @@ WITH first_query AS (INSERT INTO modify_table (id) VALUES (10001)),
SET client_min_messages TO debug2;
-- pushed down without the insert
WITH mb AS (UPDATE modify_table SET val = 3 WHERE id = 3 RETURNING NULL) INSERT INTO modify_table WITH ma AS (SELECT * FROM modify_table LIMIT 10) SELECT count(*) FROM mb;
-DEBUG: cannot push down this subquery
-DETAIL: Limit clause is currently unsupported when a subquery references a column from another query
+DEBUG: Creating router plan
+DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match
+DETAIL: Subquery contains an aggregation in the same position as the target table's partition column.
+HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery.
DEBUG: Creating router plan
DEBUG: query has a single distribution column value: 3
DEBUG: Collecting INSERT ... SELECT results on coordinator
diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule
index 1484c712f..d8cc77c73 100644
--- a/src/test/regress/isolation_schedule
+++ b/src/test/regress/isolation_schedule
@@ -77,6 +77,7 @@ test: isolation_global_pid
test: isolation_citus_locks
test: isolation_reference_table
test: isolation_schema_based_sharding
+test: isolation_citus_pause_node
test: isolation_citus_schema_distribute_undistribute
# Rebalancer
diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule
index 67473e471..4dead5be3 100644
--- a/src/test/regress/multi_1_schedule
+++ b/src/test/regress/multi_1_schedule
@@ -16,10 +16,10 @@
# Tests around schema changes, these are run first, so there's no preexisting objects.
# ---
test: multi_extension
+test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw
test: single_node
test: relation_access_tracking_single_node
test: single_node_truncate
-test: multi_test_helpers multi_test_helpers_superuser multi_create_fdw
test: multi_cluster_management
# below tests are placed right after multi_cluster_management as we do
@@ -50,6 +50,9 @@ test: multi_metadata_attributes
test: multi_read_from_secondaries
+test: grant_on_database_propagation
+test: alter_database_propagation
+
# ----------
# multi_citus_tools tests utility functions written for citus tools
# ----------
@@ -157,7 +160,7 @@ test: with_executors with_join with_partitioning with_transactions with_dml
# Tests around DDL statements run on distributed tables
# ----------
test: multi_index_statements
-test: multi_alter_table_statements
+test: multi_alter_table_statements alter_table_add_column
test: multi_alter_table_add_constraints
test: multi_alter_table_add_constraints_without_name
test: multi_alter_table_add_foreign_key_without_name
@@ -203,6 +206,7 @@ test: multi_modifying_xacts
test: multi_generate_ddl_commands
test: multi_create_shards
test: multi_transaction_recovery
+test: multi_transaction_recovery_multiple_databases
test: local_dist_join_modifications
test: local_table_join
@@ -304,6 +308,7 @@ test: mx_regular_user
test: citus_locks
test: global_cancel
test: sequences_owned_by
+test: citus_schema_move
test: remove_coordinator
# ----------
diff --git a/src/test/regress/multi_mx_schedule b/src/test/regress/multi_mx_schedule
index 682379b78..6654b4ab0 100644
--- a/src/test/regress/multi_mx_schedule
+++ b/src/test/regress/multi_mx_schedule
@@ -69,6 +69,7 @@ test: local_shard_execution_dropped_column
test: metadata_sync_helpers
test: issue_6592
+test: executor_local_failure
# test that no tests leaked intermediate results. This should always be last
test: ensure_no_intermediate_data_leak
diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule
index 4d42dbc78..65a272566 100644
--- a/src/test/regress/multi_schedule
+++ b/src/test/regress/multi_schedule
@@ -65,6 +65,7 @@ test: pg13 pg12
test: pg14
test: pg15
test: pg15_jsonpath detect_conn_close
+test: pg16
test: drop_column_partitioned_table
test: tableam
@@ -112,7 +113,7 @@ test: clock
# MERGE tests
test: merge pgmerge merge_repartition2
-test: merge_repartition1
+test: merge_repartition1 merge_schema_sharding
test: merge_partition_tables
# ---------
diff --git a/src/test/regress/spec/isolation_add_remove_node.spec b/src/test/regress/spec/isolation_add_remove_node.spec
index 014592d25..58c5c1517 100644
--- a/src/test/regress/spec/isolation_add_remove_node.spec
+++ b/src/test/regress/spec/isolation_add_remove_node.spec
@@ -45,6 +45,12 @@ step "s1-disable-node-1"
SELECT public.wait_until_metadata_sync();
}
+step "s1-disable-node-2"
+{
+ SELECT 1 FROM master_disable_node('localhost', 57638);
+ SELECT public.wait_until_metadata_sync();
+}
+
step "s1-remove-node-1"
{
SELECT * FROM master_remove_node('localhost', 57637);
@@ -88,6 +94,16 @@ step "s2-disable-node-1"
SELECT public.wait_until_metadata_sync();
}
+step "s2-disable-node-2"
+{
+ SELECT 1 FROM master_disable_node('localhost', 57638);
+}
+
+step "s2-wait-metadata-sync"
+{
+ SELECT public.wait_until_metadata_sync();
+}
+
step "s2-remove-node-1"
{
SELECT * FROM master_remove_node('localhost', 57637);
@@ -135,4 +151,4 @@ permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node
permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes"
// disable an active node from 2 transactions, one aborts
-permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes"
+permutation "s1-add-node-1" "s1-add-node-2" "s1-begin" "s1-disable-node-2" "s2-disable-node-2" "s1-abort" "s2-wait-metadata-sync" "s1-show-nodes"
diff --git a/src/test/regress/spec/isolation_citus_pause_node.spec b/src/test/regress/spec/isolation_citus_pause_node.spec
new file mode 100644
index 000000000..8449e2b3c
--- /dev/null
+++ b/src/test/regress/spec/isolation_citus_pause_node.spec
@@ -0,0 +1,185 @@
+setup
+{
+ SET citus.shard_replication_factor to 1;
+
+ create table city (id int , name text );
+ SELECT create_reference_table('city');
+
+ CREATE TABLE company(id int primary key, name text, city_id int);
+ select create_distributed_table('company', 'id');
+
+ create table employee(id int , name text, company_id int );
+ alter table employee add constraint employee_pkey primary key (id,company_id);
+
+ select create_distributed_table('employee', 'company_id');
+
+ insert into city values(1,'city1');
+ insert into city values(2,'city2');
+
+
+ insert into company values(1,'c1', 1);
+ insert into company values(2,'c2',2);
+ insert into company values(3,'c3',1);
+
+ insert into employee values(1,'e1',1);
+ insert into employee values(2,'e2',1);
+ insert into employee values(3,'e3',1);
+
+ insert into employee values(4,'e4',2);
+ insert into employee values(5,'e5',2);
+ insert into employee values(6,'e6',2);
+
+ insert into employee values(7,'e7',3);
+ insert into employee values(8,'e8',3);
+ insert into employee values(9,'e9',3);
+ insert into employee values(10,'e10',3);
+
+
+}
+
+teardown
+{
+ DROP TABLE employee,company,city;
+}
+
+session "s1"
+
+step "s1-begin"
+{
+ BEGIN;
+}
+
+step "s1-node-not-found"
+{
+ DO $$
+ DECLARE
+ v_node_id int:= -1;
+ v_node_exists boolean := true;
+ v_exception_message text;
+ v_expected_exception_message text := '';
+ BEGIN
+ select nextval('pg_dist_node_nodeid_seq')::int into v_node_id;
+ select citus_pause_node_within_txn(v_node_id) ;
+ EXCEPTION
+ WHEN SQLSTATE 'P0002' THEN
+ GET STACKED DIAGNOSTICS v_exception_message = MESSAGE_TEXT;
+ v_expected_exception_message := 'node ' || v_node_id || ' not found';
+ if v_exception_message = v_expected_exception_message then
+ RAISE NOTICE 'Node not found.';
+ end if;
+ END;
+ $$
+ LANGUAGE plpgsql;
+}
+
+step "s1-pause-node"
+{
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+
+
+ -- Pause the node
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+}
+
+step "s1-pause-node-force"
+{
+ SET client_min_messages = 'notice';
+ DO $$
+ DECLARE
+ v_shard_id int;
+ v_node_id int;
+ v_node_name text;
+ v_node_port int;
+ v_force boolean := true;
+ v_lock_cooldown int := 100;
+ BEGIN
+ --The first message in the block is being printed on the top of the code block. So adding a dummy message
+ --to make sure that the first message is printed in correct place.
+
+ raise notice '';
+ -- Get the shard id for the distribution column
+ SELECT get_shard_id_for_distribution_column('employee', 3) into v_shard_id;
+
+ --Get the node id for the shard id
+ SELECT nodename,nodeport into v_node_name,v_node_port FROM citus_shards WHERE shardid = v_shard_id limit 1;
+
+ -- Get the node id for the shard id
+ SELECT nodeid into v_node_id FROM pg_dist_node WHERE nodename = v_node_name and nodeport = v_node_port limit 1;
+
+
+ -- Pause the node with force true
+ perform pg_catalog.citus_pause_node_within_txn(v_node_id,v_force,v_lock_cooldown) ;
+ END;
+ $$
+ LANGUAGE plpgsql;
+}
+
+step "s1-end"
+{
+ COMMIT;
+}
+
+session "s2"
+
+
+step "s2-begin"
+{
+ BEGIN;
+}
+
+step "s2-insert-distributed"
+{
+ -- Execute the INSERT statement
+ insert into employee values(11,'e11',3);
+
+}
+
+step "s2-insert-reference"{
+ -- Execute the INSERT statement
+ insert into city values(3,'city3');
+}
+
+step "s2-select-distributed"{
+
+ select * from employee where id = 10;
+}
+
+
+step "s2-delete-distributed"{
+ -- Execute the DELETE statement
+ delete from employee where id = 9;
+}
+
+step "s2-end"
+{
+ COMMIT;
+}
+
+permutation "s1-begin" "s2-begin" "s1-pause-node" "s2-insert-distributed" "s1-end" "s2-end"
+permutation "s1-begin" "s2-begin" "s1-pause-node" "s2-delete-distributed" "s1-end" "s2-end"
+permutation "s1-begin" "s1-pause-node" "s2-begin" "s2-select-distributed" "s1-end" "s2-end"
+permutation "s1-begin" "s2-begin" "s1-pause-node" "s2-insert-reference" "s1-end" "s2-end"
+permutation "s1-begin" "s1-pause-node" "s1-pause-node" "s1-end"
+permutation "s1-begin" "s1-node-not-found" "s1-end"
+permutation "s1-begin" "s2-begin" "s2-insert-distributed" "s1-pause-node-force"(*) "s1-end" "s2-end"
diff --git a/src/test/regress/spec/isolation_pg_send_cancellation.spec b/src/test/regress/spec/isolation_pg_send_cancellation.spec
deleted file mode 100644
index 46c6a0539..000000000
--- a/src/test/regress/spec/isolation_pg_send_cancellation.spec
+++ /dev/null
@@ -1,65 +0,0 @@
-setup
-{
- CREATE FUNCTION run_pg_send_cancellation(int,int)
- RETURNS void
- AS 'citus'
- LANGUAGE C STRICT;
-
- CREATE FUNCTION get_cancellation_key()
- RETURNS int
- AS 'citus'
- LANGUAGE C STRICT;
-
- CREATE TABLE cancel_table (pid int, cancel_key int);
-}
-
-teardown
-{
- DROP TABLE IF EXISTS cancel_table;
-}
-
-session "s1"
-
-/* store the PID and cancellation key of session 1 */
-step "s1-register"
-{
- INSERT INTO cancel_table VALUES (pg_backend_pid(), get_cancellation_key());
-}
-
-/* lock the table from session 1, will block and get cancelled */
-step "s1-lock"
-{
- BEGIN;
- LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
- END;
-}
-
-session "s2"
-
-/* lock the table from session 2 to block session 1 */
-step "s2-lock"
-{
- BEGIN;
- LOCK TABLE cancel_table IN ACCESS EXCLUSIVE MODE;
-}
-
-/* PID mismatch */
-step "s2-wrong-cancel-1"
-{
- SELECT run_pg_send_cancellation(pid + 1, cancel_key) FROM cancel_table;
-}
-
-/* cancellation key mismatch */
-step "s2-wrong-cancel-2"
-{
- SELECT run_pg_send_cancellation(pid, cancel_key + 1) FROM cancel_table;
-}
-
-/* cancel the LOCK statement in session 1 */
-step "s2-cancel"
-{
- SELECT run_pg_send_cancellation(pid, cancel_key) FROM cancel_table;
- END;
-}
-
-permutation "s1-register" "s2-lock" "s1-lock" "s2-wrong-cancel-1" "s2-wrong-cancel-2" "s2-cancel"
diff --git a/src/test/regress/spec/isolation_shard_rebalancer_progress.spec b/src/test/regress/spec/isolation_shard_rebalancer_progress.spec
index e329e9483..234703c21 100644
--- a/src/test/regress/spec/isolation_shard_rebalancer_progress.spec
+++ b/src/test/regress/spec/isolation_shard_rebalancer_progress.spec
@@ -131,7 +131,7 @@ session "s7"
step "s7-get-progress"
{
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
@@ -157,7 +157,7 @@ step "s7-get-progress"
step "s7-get-progress-ordered"
{
set LOCAL client_min_messages=NOTICE;
- WITH possible_sizes(size) as (VALUES (0), (8000), (50000), (200000), (400000))
+ WITH possible_sizes(size) as (VALUES (0), (8000), (40000), (200000), (480000))
SELECT
table_name,
shardid,
diff --git a/src/test/regress/sql/alter_database_propagation.sql b/src/test/regress/sql/alter_database_propagation.sql
new file mode 100644
index 000000000..2b9d3ac33
--- /dev/null
+++ b/src/test/regress/sql/alter_database_propagation.sql
@@ -0,0 +1,59 @@
+set citus.log_remote_commands = true;
+set citus.grep_remote_commands = '%ALTER DATABASE%';
+
+
+-- since ALLOW_CONNECTIONS alter option should be executed in a different database
+-- and since we don't have a multiple database support for now,
+-- this statement will get error
+alter database regression ALLOW_CONNECTIONS false;
+
+
+alter database regression with CONNECTION LIMIT 100;
+alter database regression with IS_TEMPLATE true CONNECTION LIMIT 50;
+alter database regression with CONNECTION LIMIT -1;
+alter database regression with IS_TEMPLATE true;
+alter database regression with IS_TEMPLATE false;
+-- this statement will get error since we don't have a multiple database support for now
+alter database regression rename to regression2;
+
+alter database regression set default_transaction_read_only = true;
+
+set default_transaction_read_only = false;
+
+alter database regression set default_transaction_read_only from current;
+alter database regression set default_transaction_read_only to DEFAULT;
+alter database regression RESET default_transaction_read_only;
+
+alter database regression SET TIME ZONE '-7';
+
+alter database regression set TIME ZONE LOCAL;
+alter database regression set TIME ZONE DEFAULT;
+alter database regression RESET TIME ZONE;
+
+alter database regression SET TIME ZONE INTERVAL '-08:00' HOUR TO MINUTE;
+
+alter database regression RESET TIME ZONE;
+
+
+alter database regression set default_transaction_isolation = 'serializable';
+set default_transaction_isolation = 'read committed';
+
+alter database regression set default_transaction_isolation from current;
+alter database regression set default_transaction_isolation to DEFAULT;
+alter database regression RESET default_transaction_isolation;
+
+alter database regression set statement_timeout = 1000;
+set statement_timeout = 2000;
+
+alter database regression set statement_timeout from current;
+alter database regression set statement_timeout to DEFAULT;
+alter database regression RESET statement_timeout;
+
+alter database regression set lock_timeout = 1201.5;
+set lock_timeout = 1202.5;
+
+alter database regression set lock_timeout from current;
+alter database regression set lock_timeout to DEFAULT;
+alter database regression RESET lock_timeout;
+
+set citus.log_remote_commands = false;
diff --git a/src/test/regress/sql/alter_table_add_column.sql b/src/test/regress/sql/alter_table_add_column.sql
new file mode 100644
index 000000000..255e7714f
--- /dev/null
+++ b/src/test/regress/sql/alter_table_add_column.sql
@@ -0,0 +1,73 @@
+CREATE SCHEMA alter_table_add_column;
+SET search_path TO alter_table_add_column;
+
+SET citus.next_shard_id TO 1830000;
+SET citus.shard_replication_factor TO 1;
+
+SET client_min_messages TO NOTICE;
+
+CREATE TABLE referenced (int_col integer PRIMARY KEY);
+CREATE TABLE referencing (text_col text);
+SELECT create_distributed_table('referenced', null);
+SELECT create_distributed_table('referencing', null);
+
+CREATE SCHEMA alter_table_add_column_other_schema;
+
+CREATE OR REPLACE FUNCTION alter_table_add_column_other_schema.my_random(numeric)
+ RETURNS numeric AS
+$$
+BEGIN
+ RETURN 7 * $1;
+END;
+$$
+LANGUAGE plpgsql IMMUTABLE;
+
+CREATE COLLATION caseinsensitive (
+ provider = icu,
+ locale = 'und-u-ks-level2'
+);
+
+CREATE TYPE "simple_!\'custom_type" AS (a integer, b integer);
+
+ALTER TABLE referencing ADD COLUMN test_1 integer DEFAULT (alter_table_add_column_other_schema.my_random(7) + random() + 5) NOT NULL CONSTRAINT fkey REFERENCES referenced(int_col) ON UPDATE SET DEFAULT ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE REFERENCES referenced(int_col) ON UPDATE CASCADE ON DELETE SET DEFAULT NOT DEFERRABLE INITIALLY IMMEDIATE;
+ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (test_1 * alter_table_add_column_other_schema.my_random(1)) STORED UNIQUE REFERENCES referenced(int_col) MATCH FULL;
+ALTER TABLE referencing ADD COLUMN test_4 integer PRIMARY KEY WITH (fillfactor=70) NOT NULL REFERENCES referenced(int_col) MATCH SIMPLE ON UPDATE CASCADE ON DELETE SET DEFAULT;
+ALTER TABLE referencing ADD COLUMN test_5 integer CONSTRAINT unique_c UNIQUE WITH (fillfactor=50) NULL;
+ALTER TABLE referencing ADD COLUMN test_6 text COMPRESSION pglz COLLATE caseinsensitive NOT NULL;
+ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type";
+
+-- we give up deparsing ALTER TABLE command if it needs to create a check constraint, and we fallback to legacy behavior
+ALTER TABLE referencing ADD COLUMN test_8 integer CHECK (test_8 > 0);
+ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0);
+
+-- try to add test_6 again, but with IF NOT EXISTS
+ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text;
+ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 integer;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('alter_table_add_column.referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_index_defs FROM get_index_defs('alter_table_add_column', 'referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_column_defaults FROM get_column_defaults('alter_table_add_column', 'referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_column_attrs FROM get_column_attrs('alter_table_add_column.referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+SET client_min_messages TO WARNING;
+DROP SCHEMA alter_table_add_column, alter_table_add_column_other_schema CASCADE;
diff --git a/src/test/regress/sql/alter_table_set_access_method.sql b/src/test/regress/sql/alter_table_set_access_method.sql
index d7720cfda..24dc89fe4 100644
--- a/src/test/regress/sql/alter_table_set_access_method.sql
+++ b/src/test/regress/sql/alter_table_set_access_method.sql
@@ -1,5 +1,7 @@
--- test for Postgres version
--- should error before PG12
+--
+-- ALTER_TABLE_SET_ACCESS_METHOD
+--
+
CREATE TABLE alter_am_pg_version_table (a INT);
SELECT alter_table_set_access_method('alter_am_pg_version_table', 'columnar');
DROP TABLE alter_am_pg_version_table;
@@ -258,8 +260,16 @@ create table events (event_id bigserial, event_time timestamptz default now(), p
create index on events (event_id);
insert into events (payload) select 'hello-'||s from generate_series(1,10) s;
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+
BEGIN;
+ \if :server_version_ge_16
+ SET LOCAL debug_parallel_query = regress;
+ \else
SET LOCAL force_parallel_mode = regress;
+ \endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;
diff --git a/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql b/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql
index 50f4d2318..2b805b6a4 100644
--- a/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql
+++ b/src/test/regress/sql/arbitrary_configs_truncate_cascade.sql
@@ -1,8 +1,14 @@
SET search_path TO truncate_cascade_tests_schema;
+-- Hide detail of truncate error because it might either reference
+-- table_with_fk_1 or table_with_fk_2 in the error message.
+\set VERBOSITY TERSE
+
-- Test truncate error on table with dependencies
TRUNCATE table_with_pk;
+\set VERBOSITY DEFAULT
+
-- Test truncate rollback on table with dependencies
SELECT COUNT(*) FROM table_with_fk_1;
SELECT COUNT(*) FROM table_with_fk_2;
diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql
index 2bb79a802..2f7c76d6e 100644
--- a/src/test/regress/sql/citus_local_tables_mx.sql
+++ b/src/test/regress/sql/citus_local_tables_mx.sql
@@ -473,7 +473,7 @@ select run_command_on_workers($$SELECT count(*)=0 from citus_local_tables_mx.v10
select run_command_on_workers($$SELECT count(*)=0 from citus_local_tables_mx.v102$$);
CREATE TABLE loc_tb_2 (a int);
-CREATE VIEW v104 AS SELECT * from loc_tb_2;
+CREATE VIEW v104 AS SELECT * from loc_tb_2 table_name_for_view;
SET client_min_messages TO DEBUG1;
-- verify the CREATE command for the view is generated correctly
diff --git a/src/test/regress/sql/citus_local_tables_queries.sql b/src/test/regress/sql/citus_local_tables_queries.sql
index adae17118..f80de6c57 100644
--- a/src/test/regress/sql/citus_local_tables_queries.sql
+++ b/src/test/regress/sql/citus_local_tables_queries.sql
@@ -277,9 +277,8 @@ INSERT INTO citus_local_table
SELECT * from citus_local_table_2;
INSERT INTO citus_local_table
-SELECT * from citus_local_table_2
-ORDER BY 1,2
-LIMIT 10;
+SELECT sum(a), b from citus_local_table_2
+GROUP BY b;
INSERT INTO citus_local_table
SELECT * from postgres_local_table;
diff --git a/src/test/regress/sql/citus_schema_move.sql b/src/test/regress/sql/citus_schema_move.sql
new file mode 100644
index 000000000..8240feff7
--- /dev/null
+++ b/src/test/regress/sql/citus_schema_move.sql
@@ -0,0 +1,175 @@
+CREATE SCHEMA citus_schema_move;
+SET search_path TO citus_schema_move;
+
+SET citus.next_shard_id TO 2220000;
+SET citus.shard_count TO 32;
+SET citus.shard_replication_factor TO 1;
+
+SET client_min_messages TO WARNING;
+SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
+
+SELECT master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
+
+-- Due to a race condition that happens in TransferShards() when the same shard id
+-- is used to create the same shard on a different worker node, need to call
+-- citus_cleanup_orphaned_resources() to clean up any orphaned resources before
+-- running the tests.
+--
+-- See https://github.com/citusdata/citus/pull/7180#issuecomment-1706786615.
+
+CALL citus_cleanup_orphaned_resources();
+
+SET client_min_messages TO NOTICE;
+
+-- test null input, should be no-op
+SELECT citus_schema_move(schema_id=>null, target_node_name=>null, target_node_port=>null, shard_transfer_mode=>null);
+SELECT citus_schema_move(schema_id=>null, target_node_id=>null, shard_transfer_mode=>null);
+SELECT citus_schema_move(schema_id=>null, target_node_id=>null, shard_transfer_mode=>null);
+
+SET citus.enable_schema_based_sharding TO ON;
+
+CREATE SCHEMA s1;
+
+-- test invalid schema
+SELECT citus_schema_move('no_such_schema', 'dummy_node_name', 1234);
+SELECT citus_schema_move('no_such_schema', 1234);
+
+-- test regular schema
+SELECT citus_schema_move('citus_schema_move', 'dummy_node_name', 1234);
+SELECT citus_schema_move('citus_schema_move', 1234);
+
+-- test empty distributed schema
+SELECT citus_schema_move('s1', 'dummy_node_name', 1234);
+SELECT citus_schema_move('s1', 1234);
+
+CREATE TABLE s1.t1 (a int);
+
+-- test invalid node name / port / id
+SELECT citus_schema_move('s1', 'dummy_node_name', 1234);
+SELECT citus_schema_move('s1', 1234);
+
+-- errors due to missing pkey / replicate ident.
+SELECT citus_schema_move('s1', nodename, nodeport) FROM pg_dist_node
+WHERE isactive AND shouldhaveshards AND noderole='primary' AND
+ (nodename, nodeport) NOT IN (
+ SELECT nodename, nodeport FROM citus_shards WHERE table_name = 's1.t1'::regclass
+ );
+
+-- errors as we try to move the schema to the same node
+SELECT citus_schema_move('s1', nodename, nodeport, 'block_writes')
+FROM citus_shards
+JOIN pg_dist_node USING (nodename, nodeport)
+WHERE noderole = 'primary' AND table_name = 's1.t1'::regclass;
+
+SELECT citus_schema_move('s1', nodeid, 'block_writes')
+FROM citus_shards
+JOIN pg_dist_node USING (nodename, nodeport)
+WHERE noderole = 'primary' AND table_name = 's1.t1'::regclass;
+
+-- returns id, host name and host port of a non-coordinator node that given schema can be moved to
+CREATE OR REPLACE FUNCTION get_non_coord_candidate_node_for_schema_move(
+ schema_id regnamespace)
+RETURNS TABLE (nodeid integer, nodename text, nodeport integer)
+SET search_path TO 'pg_catalog, public'
+AS $func$
+BEGIN
+ IF NOT EXISTS (SELECT 1 FROM pg_dist_schema WHERE schemaid = schema_id)
+ THEN
+ RAISE EXCEPTION '% is not a distributed schema', schema_id;
+ END IF;
+
+ CREATE TEMP TABLE nodeid_nodename_nodeport ON COMMIT DROP AS
+ SELECT pdn1.nodeid, pdn1.nodename, pdn1.nodeport
+ FROM pg_dist_node pdn1
+ WHERE isactive AND shouldhaveshards AND noderole='primary' AND groupid != 0 AND
+ (pdn1.nodename, pdn1.nodeport) NOT IN (
+ SELECT cs.nodename, cs.nodeport
+ FROM citus_shards cs
+ JOIN pg_dist_node pdn2
+ ON cs.nodename = pdn2.nodename AND cs.nodeport = pdn2.nodeport
+ WHERE pdn2.noderole='primary' AND starts_with(table_name::text, schema_id::text)
+ );
+
+ IF NOT EXISTS (SELECT 1 FROM nodeid_nodename_nodeport)
+ THEN
+ RAISE EXCEPTION 'could not determine a node to move the schema to';
+ END IF;
+
+ RETURN QUERY SELECT * FROM nodeid_nodename_nodeport LIMIT 1;
+END;
+$func$ LANGUAGE plpgsql;
+
+CREATE TABLE s1.t2 (a int);
+
+-- move the schema to a different node
+
+SELECT nodeid AS s1_new_nodeid, quote_literal(nodename) AS s1_new_nodename, nodeport AS s1_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s1') \gset
+
+SELECT citus_schema_move('s1', :s1_new_nodename, :s1_new_nodeport, 'block_writes');
+
+SELECT (:s1_new_nodename, :s1_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+
+SELECT nodeid AS s1_new_nodeid, quote_literal(nodename) AS s1_new_nodename, nodeport AS s1_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s1') \gset
+
+SELECT citus_schema_move('s1', :s1_new_nodeid, 'block_writes');
+
+SELECT (:s1_new_nodename, :s1_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+
+-- move the schema to the coordinator
+
+SELECT citus_schema_move('s1', 'localhost', :master_port, 'block_writes');
+
+SELECT ('localhost', :master_port) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+
+-- move the schema away from the coordinator
+
+SELECT nodeid AS s1_new_nodeid, quote_literal(nodename) AS s1_new_nodename, nodeport AS s1_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s1') \gset
+
+SELECT citus_schema_move('s1', :s1_new_nodename, :s1_new_nodeport, 'block_writes');
+
+SELECT (:s1_new_nodename, :s1_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's1'::text));
+
+CREATE USER tenantuser superuser;
+SET ROLE tenantuser;
+
+CREATE SCHEMA s2;
+CREATE TABLE s2.t1 (a int);
+CREATE TABLE s2.t2 (a int);
+
+CREATE USER regularuser;
+SET ROLE regularuser;
+
+-- throws an error as the user is not the owner of the schema
+SELECT citus_schema_move('s2', 'dummy_node', 1234);
+
+-- assign all tables to regularuser
+RESET ROLE;
+SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO regularuser; $$);
+
+GRANT USAGE ON SCHEMA citus_schema_move TO regularuser;
+
+SET ROLE regularuser;
+
+SELECT nodeid AS s2_new_nodeid, quote_literal(nodename) AS s2_new_nodename, nodeport AS s2_new_nodeport
+FROM get_non_coord_candidate_node_for_schema_move('s2') \gset
+
+SELECT citus_schema_move('s2', :s2_new_nodename, :s2_new_nodeport, 'force_logical');
+
+SELECT (:s2_new_nodename, :s2_new_nodeport) = ALL(SELECT nodename, nodeport FROM citus_shards JOIN pg_dist_node USING (nodename, nodeport) WHERE noderole = 'primary' AND starts_with(table_name::text, 's2'::text));
+
+SET client_min_messages TO WARNING;
+DROP SCHEMA s2 CASCADE;
+SET client_min_messages TO NOTICE;
+
+RESET ROLE;
+
+REVOKE USAGE ON SCHEMA citus_schema_move FROM regularuser;
+DROP ROLE regularuser, tenantuser;
+
+RESET citus.enable_schema_based_sharding;
+
+SET client_min_messages TO WARNING;
+DROP SCHEMA citus_schema_move, s1 CASCADE;
diff --git a/src/test/regress/sql/columnar_chunk_filtering.sql b/src/test/regress/sql/columnar_chunk_filtering.sql
index b8b2b411d..d37a8d8b6 100644
--- a/src/test/regress/sql/columnar_chunk_filtering.sql
+++ b/src/test/regress/sql/columnar_chunk_filtering.sql
@@ -1,6 +1,10 @@
--
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
--
+-- It has an alternative test output file
+-- because PG16 changed the order of some Filters in EXPLAIN
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
--
diff --git a/src/test/regress/sql/columnar_create.sql b/src/test/regress/sql/columnar_create.sql
index 017cc0d8f..408ce126e 100644
--- a/src/test/regress/sql/columnar_create.sql
+++ b/src/test/regress/sql/columnar_create.sql
@@ -56,7 +56,7 @@ AS SELECT * FROM columnar_table_1;
SELECT columnar.get_storage_id(oid) AS columnar_table_1_mv_storage_id
FROM pg_class WHERE relname='columnar_table_1_mv' \gset
--- test columnar_relation_set_new_filenode
+-- test columnar_relation_set_new_filelocator
REFRESH MATERIALIZED VIEW columnar_table_1_mv;
SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_mv_storage_id);
diff --git a/src/test/regress/sql/columnar_fallback_scan.sql b/src/test/regress/sql/columnar_fallback_scan.sql
index 28e521eaf..93a701062 100644
--- a/src/test/regress/sql/columnar_fallback_scan.sql
+++ b/src/test/regress/sql/columnar_fallback_scan.sql
@@ -20,7 +20,16 @@ select count(*), min(i), max(i), avg(i) from fallback_scan;
-- Negative test: try to force a parallel plan with at least two
-- workers, but columnar should reject it and use a non-parallel scan.
--
+
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+
+\if :server_version_ge_16
+set debug_parallel_query = regress;
+\else
set force_parallel_mode = regress;
+\endif
set min_parallel_table_scan_size = 1;
set parallel_tuple_cost = 0;
set max_parallel_workers = 4;
@@ -28,7 +37,11 @@ set max_parallel_workers_per_gather = 4;
explain (costs off) select count(*), min(i), max(i), avg(i) from fallback_scan;
select count(*), min(i), max(i), avg(i) from fallback_scan;
-set force_parallel_mode to default;
+\if :server_version_ge_16
+set debug_parallel_query = default;
+\else
+set force_parallel_mode = default;
+\endif
set min_parallel_table_scan_size to default;
set parallel_tuple_cost to default;
set max_parallel_workers to default;
diff --git a/src/test/regress/sql/columnar_indexes.sql b/src/test/regress/sql/columnar_indexes.sql
index 34895f503..28716c970 100644
--- a/src/test/regress/sql/columnar_indexes.sql
+++ b/src/test/regress/sql/columnar_indexes.sql
@@ -1,4 +1,6 @@
--
+-- COLUMNAR_INDEXES
+--
-- Testing indexes on on columnar tables.
--
@@ -448,6 +450,10 @@ BEGIN;
-- this wouldn't flush any data
insert into events (payload) select 'hello-'||s from generate_series(1, 10) s;
+ SHOW server_version \gset
+ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+ \gset
+
-- Since table is large enough, normally postgres would prefer using
-- parallel workers when building the index.
--
@@ -459,7 +465,12 @@ BEGIN;
-- by postgres and throws an error. For this reason, here we don't expect
-- following commnad to fail since we prevent using parallel workers for
-- columnar tables.
+
+ \if :server_version_ge_16
+ SET LOCAL debug_parallel_query = regress;
+ \else
SET LOCAL force_parallel_mode = regress;
+ \endif
SET LOCAL min_parallel_table_scan_size = 1;
SET LOCAL parallel_tuple_cost = 0;
SET LOCAL max_parallel_workers = 4;
diff --git a/src/test/regress/sql/columnar_memory.sql b/src/test/regress/sql/columnar_memory.sql
index 21bab57f5..5f29eb1e3 100644
--- a/src/test/regress/sql/columnar_memory.sql
+++ b/src/test/regress/sql/columnar_memory.sql
@@ -77,10 +77,10 @@ SELECT CASE WHEN 1.0 * TopMemoryContext / :top_post BETWEEN 0.98 AND 1.03 THEN 1
FROM columnar_test_helpers.columnar_store_memory_stats();
-- before this change, max mem usage while executing inserts was 28MB and
--- with this change it's less than 8MB.
+-- with this change it's less than 9MB.
SELECT
- (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
- (SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
+ (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
+ (SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
\x
diff --git a/src/test/regress/sql/columnar_partitioning.sql b/src/test/regress/sql/columnar_partitioning.sql
index 8ae26ee3e..01a9e892e 100644
--- a/src/test/regress/sql/columnar_partitioning.sql
+++ b/src/test/regress/sql/columnar_partitioning.sql
@@ -1,3 +1,6 @@
+--
+-- COLUMNAR_PARTITIONING
+--
CREATE TABLE parent(ts timestamptz, i int, n numeric, s text)
PARTITION BY RANGE (ts);
@@ -21,8 +24,16 @@ INSERT INTO parent SELECT '2020-03-15', 30, 300, 'three thousand'
INSERT INTO parent SELECT '2020-04-15', 30, 300, 'three thousand'
FROM generate_series(1,100000);
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+
-- run parallel plans
+\if :server_version_ge_16
+SET debug_parallel_query = regress;
+\else
SET force_parallel_mode = regress;
+\endif
SET min_parallel_table_scan_size = 1;
SET parallel_tuple_cost = 0;
SET max_parallel_workers = 4;
@@ -46,7 +57,11 @@ EXPLAIN (costs off) SELECT count(*), sum(i), min(i), max(i) FROM parent;
SELECT count(*), sum(i), min(i), max(i) FROM parent;
SET columnar.enable_custom_scan TO DEFAULT;
+\if :server_version_ge_16
+SET debug_parallel_query TO DEFAULT;
+\else
SET force_parallel_mode TO DEFAULT;
+\endif
SET min_parallel_table_scan_size TO DEFAULT;
SET parallel_tuple_cost TO DEFAULT;
SET max_parallel_workers TO DEFAULT;
diff --git a/src/test/regress/sql/columnar_paths.sql b/src/test/regress/sql/columnar_paths.sql
index 92ffa7d66..3c92d4a21 100644
--- a/src/test/regress/sql/columnar_paths.sql
+++ b/src/test/regress/sql/columnar_paths.sql
@@ -193,7 +193,7 @@ WHERE w2.a = 123;
EXPLAIN (COSTS OFF) SELECT sub_1.b, sub_2.a, sub_3.avg
FROM
- (SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b HAVING count(DISTINCT a) > 0 ORDER BY 1 DESC LIMIT 5) AS sub_1,
+ (SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b ORDER BY 1 DESC LIMIT 5) AS sub_1,
(SELECT a FROM full_correlated WHERE (a > 10) GROUP BY a HAVING count(DISTINCT a) >= 1 ORDER BY 1 DESC LIMIT 3) AS sub_2,
(SELECT avg(a) AS AVG FROM full_correlated WHERE (a > 2) GROUP BY a HAVING sum(a) > 10 ORDER BY (sum(d) - avg(a) - COALESCE(array_upper(ARRAY[max(a)],1) * 5, 0)) DESC LIMIT 3) AS sub_3
WHERE sub_2.a < sub_1.b::integer
diff --git a/src/test/regress/sql/columnar_test_helpers.sql b/src/test/regress/sql/columnar_test_helpers.sql
index 2e85ebc88..d88f8b88f 100644
--- a/src/test/regress/sql/columnar_test_helpers.sql
+++ b/src/test/regress/sql/columnar_test_helpers.sql
@@ -1,3 +1,7 @@
+SET client_min_messages TO WARNING;
+DROP SCHEMA IF EXISTS columnar_test_helpers CASCADE;
+RESET client_min_messages;
+
CREATE SCHEMA columnar_test_helpers;
SET search_path TO columnar_test_helpers;
diff --git a/src/test/regress/sql/coordinator_shouldhaveshards.sql b/src/test/regress/sql/coordinator_shouldhaveshards.sql
index 3eb2de2e0..0365f07c2 100644
--- a/src/test/regress/sql/coordinator_shouldhaveshards.sql
+++ b/src/test/regress/sql/coordinator_shouldhaveshards.sql
@@ -230,7 +230,7 @@ CREATE TABLE dist_table1(a int);
SELECT create_distributed_table('dist_table1', 'a');
ROLLBACK;
-CREATE table ref_table(x int PRIMARY KEY, y int);
+CREATE table ref_table(x int, y int);
-- this will be replicated to the coordinator because of add_coordinator test
SELECT create_reference_table('ref_table');
@@ -245,10 +245,11 @@ ROLLBACK;
-- writing to local file and remote intermediate files
-- at the same time
INSERT INTO ref_table SELECT *, * FROM generate_series(1, 100);
-
+CREATE UNIQUE INDEX test_x_unique ON test(x);
WITH cte_1 AS (
-INSERT INTO ref_table SELECT * FROM ref_table LIMIT 10000 ON CONFLICT (x) DO UPDATE SET y = EXCLUDED.y + 1 RETURNING *)
+INSERT INTO test SELECT sum(x), y FROM test GROUP BY y ON CONFLICT (x) DO UPDATE SET y = EXCLUDED.y + 1 RETURNING *)
SELECT count(*) FROM cte_1;
+DROP INDEX test_x_unique;
-- issue #4237: preventing empty placement creation on coordinator
CREATE TABLE test_append_table(a int);
diff --git a/src/test/regress/sql/create_ref_dist_from_citus_local.sql b/src/test/regress/sql/create_ref_dist_from_citus_local.sql
index 7693d3300..7c10abce6 100644
--- a/src/test/regress/sql/create_ref_dist_from_citus_local.sql
+++ b/src/test/regress/sql/create_ref_dist_from_citus_local.sql
@@ -214,5 +214,368 @@ BEGIN;
SELECT create_distributed_table('citus_local_table_5', 'col_1', 'append');
ROLLBACK;
+\set VERBOSITY DEFAULT
+
+-- Test the UDFs that we use to convert Citus local tables to single-shard tables and
+-- reference tables.
+
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true);
+SELECT pg_catalog.citus_internal_delete_placement_metadata(1);
+
+CREATE ROLE test_user_create_ref_dist WITH LOGIN;
+GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist;
+ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_user_create_ref_dist';
+SELECT pg_reload_conf();
+SELECT pg_sleep(0.1);
+SET ROLE test_user_create_ref_dist;
+
+SET citus.next_shard_id TO 1850000;
+SET citus.next_placement_id TO 8510000;
+SET citus.shard_replication_factor TO 1;
+SET search_path TO create_ref_dist_from_citus_local;
+
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true);
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true);
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true);
+SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null);
+
+SELECT pg_catalog.citus_internal_delete_placement_metadata(null);
+
+CREATE TABLE udf_test (col_1 int);
+SELECT citus_add_local_table_to_metadata('udf_test');
+
+BEGIN;
+ SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true);
+
+ SELECT COUNT(*)=1 FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.udf_test'::regclass AND repmodel = 'k' AND colocationid = 99999 AND autoconverted = true;
+
+ SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement
+ WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset
+
+ SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid);
+
+ SELECT COUNT(*)=0 FROM pg_dist_placement WHERE placementid = :udf_test_placementid;
+ROLLBACK;
+
+RESET ROLE;
+DROP TABLE udf_test;
+REVOKE ALL ON SCHEMA create_ref_dist_from_citus_local FROM test_user_create_ref_dist;
+DROP USER test_user_create_ref_dist;
+ALTER SYSTEM RESET citus.enable_manual_metadata_changes_for_user;
+SELECT pg_reload_conf();
+SELECT pg_sleep(0.1);
+
+-- Test lazy conversion from Citus local to single-shard tables and reference tables.
+
+SET citus.next_shard_id TO 1860000;
+SET citus.next_placement_id TO 8520000;
+SET citus.shard_replication_factor TO 1;
+SET search_path TO create_ref_dist_from_citus_local;
+SET client_min_messages to ERROR;
+
+INSERT INTO reference_table_1 VALUES (1, 1), (2, 2), (201, 201), (202, 202);
+
+CREATE TABLE citus_local_table_7 (col_1 int UNIQUE);
+INSERT INTO citus_local_table_7 VALUES (1), (2), (201), (202);
+SELECT citus_add_local_table_to_metadata('citus_local_table_7');
+
+CREATE TABLE fkey_test (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+INSERT INTO fkey_test VALUES (1, '1', 1), (2, '2', 2), (201, '201', 201), (202, '202', 202);
+SELECT citus_add_local_table_to_metadata('fkey_test');
+
+-- check unsupported foreign key constraints
+ALTER TABLE reference_table_1 ADD CONSTRAINT ref_1_col_1_fkey_test_int_col_1 FOREIGN KEY (col_1) REFERENCES fkey_test(int_col_1);
+SELECT create_distributed_table('fkey_test', null, colocate_with=>'none');
+ALTER TABLE reference_table_1 DROP CONSTRAINT ref_1_col_1_fkey_test_int_col_1;
+
+ALTER TABLE citus_local_table_7 ADD CONSTRAINT citus_local_1_col_1_fkey_test_int_col_1 FOREIGN KEY (col_1) REFERENCES fkey_test(int_col_1);
+SELECT create_distributed_table('fkey_test', null, colocate_with=>'none');
+ALTER TABLE citus_local_table_7 DROP CONSTRAINT citus_local_1_col_1_fkey_test_int_col_1;
+
+ALTER TABLE fkey_test ADD CONSTRAINT fkey_test_int_col_1_citus_local_1_col_1 FOREIGN KEY (int_col_1) REFERENCES citus_local_table_7(col_1);
+SELECT create_distributed_table('fkey_test', null, colocate_with=>'none');
+ALTER TABLE fkey_test DROP CONSTRAINT fkey_test_int_col_1_citus_local_1_col_1;
+
+CREATE TABLE tbl_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+CREATE INDEX tbl_1_int_col_2_idx ON tbl_1 (int_col_2);
+
+INSERT INTO tbl_1 VALUES (1, '1', 1), (2, '2', 2), (201, '201', 201), (202, '202', 202);
+
+ALTER TABLE tbl_1 ADD CONSTRAINT tbl_1_int_col_1_ref_1_col_1 FOREIGN KEY (int_col_1) REFERENCES reference_table_1(col_1);
+ALTER TABLE tbl_1 ADD CONSTRAINT tbl_1_int_col_2_ref_1_col_1 FOREIGN KEY (int_col_2) REFERENCES reference_table_1(col_1);
+ALTER TABLE tbl_1 ADD CONSTRAINT tbl_1_int_col_2_tbl_1_int_col_1 FOREIGN KEY (int_col_2) REFERENCES tbl_1(int_col_1);
+SELECT citus_add_local_table_to_metadata('tbl_1');
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_1') AS tbl_1_old_shard_id \gset
+
+SELECT create_distributed_table('tbl_1', null, colocate_with=>'none');
+
+-- check data
+SELECT * FROM tbl_1 ORDER BY int_col_1;
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_1');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_1', :tbl_1_old_shard_id, false);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_1', 3);
+SELECT cardinality(fkey_names) = 3 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_1');
+
+-- test partitioning
+CREATE TABLE tbl_2 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text,
+ int_col_2 int
+) PARTITION BY RANGE (int_col_1);
+CREATE TABLE tbl_2_child_1 PARTITION OF tbl_2 FOR VALUES FROM (0) TO (100);
+CREATE TABLE tbl_2_child_2 PARTITION OF tbl_2 FOR VALUES FROM (200) TO (300);
+
+INSERT INTO tbl_2 VALUES (1, '1', 1), (2, '2', 2), (201, '201', 201), (202, '202', 202);
+
+SELECT citus_add_local_table_to_metadata('tbl_2');
+
+ALTER TABLE tbl_2 ADD CONSTRAINT tbl_2_int_col_1_ref_1_col_1 FOREIGN KEY (int_col_1) REFERENCES reference_table_1(col_1);
+ALTER TABLE tbl_2 ADD CONSTRAINT tbl_2_int_col_2_ref_1_col_1 FOREIGN KEY (int_col_2) REFERENCES reference_table_1(col_1);
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_2') AS tbl_2_old_shard_id \gset
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_2_child_1') AS tbl_2_child_1_old_shard_id \gset
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_2_child_2') AS tbl_2_child_2_old_shard_id \gset
+
+SELECT create_distributed_table('tbl_2', null, colocate_with=>'tbl_1');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2', :tbl_2_old_shard_id, false);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_2', 1);
+SELECT cardinality(fkey_names) = 2 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_2');
+SELECT public.verify_partition_count_on_placements('create_ref_dist_from_citus_local.tbl_2', 2);
+
+-- verify the same for children
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_1');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_1', :tbl_2_child_1_old_shard_id, false);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_2_child_1', 1);
+SELECT cardinality(fkey_names) = 2 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_2_child_1');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_2');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_2_child_2', :tbl_2_child_2_old_shard_id, false);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_2_child_2', 1);
+SELECT cardinality(fkey_names) = 2 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_2_child_2');
+
+-- verify that placements of all 4 tables are on the same node
+SELECT COUNT(DISTINCT(groupid)) = 1 FROM pg_dist_placement WHERE shardid IN (
+ :tbl_1_old_shard_id, :tbl_2_old_shard_id, :tbl_2_child_1_old_shard_id, :tbl_2_child_2_old_shard_id
+);
+
+-- verify the same by executing a router query that targets both tables
+SET client_min_messages to DEBUG2;
+SELECT COUNT(*) FROM tbl_1, tbl_2;
+SET client_min_messages to ERROR;
+
+CREATE TABLE reference_table_3(col_1 INT UNIQUE, col_2 INT UNIQUE);
+INSERT INTO reference_table_3 VALUES (1, 1), (2, 2), (201, 201), (202, 202);
+
+CREATE TABLE tbl_3 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text,
+ int_col_2 int
+) PARTITION BY RANGE (int_col_1);
+CREATE TABLE tbl_3_child_1 PARTITION OF tbl_3 FOR VALUES FROM (0) TO (100);
+
+ALTER TABLE tbl_3 ADD CONSTRAINT tbl_3_int_col_1_ref_1_col_1 FOREIGN KEY (int_col_1) REFERENCES reference_table_3(col_1);
+
+SELECT create_reference_table('reference_table_3');
+
+INSERT INTO tbl_3 VALUES (1, '1', 1), (2, '2', 2);
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_3') AS tbl_3_old_shard_id \gset
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.tbl_3_child_1') AS tbl_3_child_1_old_shard_id \gset
+
+SELECT create_distributed_table('tbl_3', null, colocate_with=>'none');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3', :tbl_3_old_shard_id, false);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_3', 1);
+SELECT cardinality(fkey_names) = 1 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_3');
+SELECT public.verify_partition_count_on_placements('create_ref_dist_from_citus_local.tbl_3', 1);
+
+-- verify the same for children
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3_child_1');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.tbl_3_child_1', :tbl_3_child_1_old_shard_id, false);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.tbl_3_child_1', 1);
+SELECT cardinality(fkey_names) = 1 AS verify_fkey_count_on_shards FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.tbl_3_child_1');
+
+-- verify that placements of all 2 tables are on the same node
+SELECT COUNT(DISTINCT(groupid)) = 1 FROM pg_dist_placement WHERE shardid IN (
+ :tbl_3_old_shard_id, :tbl_3_child_1_old_shard_id
+);
+
+-- verify the same by executing a router query that targets the table
+SET client_min_messages to DEBUG2;
+SELECT COUNT(*) FROM tbl_3;
+SET client_min_messages to ERROR;
+
+CREATE TABLE single_shard_conversion_colocated_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_colocated_1');
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.single_shard_conversion_colocated_1') AS single_shard_conversion_colocated_1_old_shard_id \gset
+
+SELECT create_distributed_table('single_shard_conversion_colocated_1', null, colocate_with=>'none');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_1');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_1', :single_shard_conversion_colocated_1_old_shard_id, false);
+
+CREATE TABLE single_shard_conversion_colocated_2 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_colocated_2');
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.single_shard_conversion_colocated_2') AS single_shard_conversion_colocated_2_old_shard_id \gset
+
+SELECT create_distributed_table('single_shard_conversion_colocated_2', null, colocate_with=>'single_shard_conversion_colocated_1');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_2');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_colocated_2', :single_shard_conversion_colocated_2_old_shard_id, false);
+
+-- make sure that they're created on the same colocation group
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_colocated_1'::regclass
+)
+=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_colocated_2'::regclass
+);
+
+-- verify that placements of 2 tables are on the same node
+SELECT COUNT(DISTINCT(groupid)) = 1 FROM pg_dist_placement WHERE shardid IN (
+ :single_shard_conversion_colocated_1_old_shard_id, :single_shard_conversion_colocated_2_old_shard_id
+);
+
+CREATE TABLE single_shard_conversion_noncolocated_1 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_noncolocated_1');
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1') AS single_shard_conversion_noncolocated_1_old_shard_id \gset
+
+SELECT create_distributed_table('single_shard_conversion_noncolocated_1', null, colocate_with=>'none');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1');
+SELECT public.verify_shard_placement_for_single_shard_table('create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1', :single_shard_conversion_noncolocated_1_old_shard_id, false);
+
+-- make sure that they're created on different colocation groups
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_colocated_1'::regclass
+)
+!=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'create_ref_dist_from_citus_local.single_shard_conversion_noncolocated_1'::regclass
+);
+
+-- Test creating a reference table from a Citus local table
+-- (ref_table_conversion_test) that has foreign keys from/to Citus
+-- local tables and reference tables:
+--
+-- citus_local_referencing ---------- ----------> citus_local_referenced
+-- | ^
+-- v |
+-- ref_table_conversion_test
+-- ^ |
+-- | v
+-- reference_table_referencing ---------- ----------> reference_table_referenced
+--
+
+CREATE TABLE citus_local_referenced(a int PRIMARY KEY);
+SELECT citus_add_local_table_to_metadata('citus_local_referenced');
+INSERT INTO citus_local_referenced VALUES (1), (2), (3), (4);
+
+CREATE TABLE reference_table_referenced(a int PRIMARY KEY);
+SELECT create_reference_table('reference_table_referenced');
+INSERT INTO reference_table_referenced VALUES (1), (2), (3), (4);
+
+CREATE TABLE ref_table_conversion_test (
+ a int PRIMARY KEY
+);
+SELECT citus_add_local_table_to_metadata('ref_table_conversion_test');
+ALTER TABLE ref_table_conversion_test ADD CONSTRAINT ref_table_a_citus_local_referenced_a FOREIGN KEY (a) REFERENCES citus_local_referenced(a);
+ALTER TABLE ref_table_conversion_test ADD CONSTRAINT ref_table_a_reference_table_referenced_a FOREIGN KEY (a) REFERENCES reference_table_referenced(a);
+INSERT INTO ref_table_conversion_test VALUES (1), (2), (3), (4);
+
+CREATE INDEX ref_table_conversion_test_a_idx1 ON ref_table_conversion_test (a);
+CREATE INDEX ref_table_conversion_test_a_idx2 ON ref_table_conversion_test (a);
+
+CREATE TABLE citus_local_referencing(a int);
+ALTER TABLE citus_local_referencing ADD CONSTRAINT citus_local_referencing_a_ref_table_a FOREIGN KEY (a) REFERENCES ref_table_conversion_test(a);
+SELECT citus_add_local_table_to_metadata('citus_local_referencing');
+INSERT INTO citus_local_referencing VALUES (1), (2), (3), (4);
+
+CREATE TABLE reference_table_referencing(a int);
+ALTER TABLE reference_table_referencing ADD CONSTRAINT reference_table_referencing_a_ref_table_a FOREIGN KEY (a) REFERENCES ref_table_conversion_test(a);
+SELECT create_reference_table('reference_table_referencing');
+INSERT INTO reference_table_referencing VALUES (1), (2), (3), (4);
+
+-- save old shardid and placementid
+SELECT get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset
+SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset
+
+SELECT create_reference_table('ref_table_conversion_test');
+
+-- check data on all placements
+SELECT result FROM run_command_on_all_nodes(
+ $$SELECT COUNT(*)=4 FROM create_ref_dist_from_citus_local.ref_table_conversion_test$$
+);
+
+SELECT public.verify_pg_dist_partition_for_reference_table('create_ref_dist_from_citus_local.ref_table_conversion_test');
+SELECT public.verify_shard_placements_for_reference_table('create_ref_dist_from_citus_local.ref_table_conversion_test',
+ :ref_table_conversion_test_old_shard_id,
+ :ref_table_conversion_test_old_coord_placement_id);
+SELECT public.verify_index_count_on_shard_placements('create_ref_dist_from_citus_local.ref_table_conversion_test', 3);
+SELECT on_node, fkey_names FROM public.get_fkey_names_on_placements('create_ref_dist_from_citus_local.ref_table_conversion_test') ORDER BY 1,2;
+
+CREATE TABLE dropped_column_test(a int, b int, c text not null, d text not null);
+INSERT INTO dropped_column_test VALUES(1, null, 'text_1', 'text_2');
+ALTER TABLE dropped_column_test DROP column b;
+
+SELECT citus_add_local_table_to_metadata('dropped_column_test');
+SELECT create_reference_table('dropped_column_test');
+
+-- check data on all placements
+SELECT result FROM run_command_on_all_nodes(
+ $$
+ SELECT jsonb_agg(q.*) FROM (
+ SELECT * FROM create_ref_dist_from_citus_local.dropped_column_test
+ ) q
+ $$
+);
+
+SET citus.shard_replication_factor TO 2;
+
+CREATE TABLE replication_factor_test(a int);
+SELECT citus_add_local_table_to_metadata('replication_factor_test');
+
+SELECT create_distributed_table('replication_factor_test', null);
+
+SET citus.shard_replication_factor TO 1;
+
-- cleanup at exit
DROP SCHEMA create_ref_dist_from_citus_local CASCADE;
diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql
index ceda9f10c..027e4f72e 100644
--- a/src/test/regress/sql/create_role_propagation.sql
+++ b/src/test/regress/sql/create_role_propagation.sql
@@ -117,7 +117,7 @@ GRANT non_dist_role_4 TO dist_role_4;
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
+SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
\c - - - :worker_1_port
diff --git a/src/test/regress/sql/distributed_domain.sql b/src/test/regress/sql/distributed_domain.sql
index b03a2040f..5bf3bd6a8 100644
--- a/src/test/regress/sql/distributed_domain.sql
+++ b/src/test/regress/sql/distributed_domain.sql
@@ -487,3 +487,4 @@ DROP DOMAIN IF EXISTS domain_does_not_exist;
SET client_min_messages TO warning;
DROP SCHEMA distributed_domain, distributed_domain_moved CASCADE;
+DROP ROLE domain_owner;
diff --git a/src/test/regress/sql/executor_local_failure.sql b/src/test/regress/sql/executor_local_failure.sql
new file mode 100644
index 000000000..0d1c56ca1
--- /dev/null
+++ b/src/test/regress/sql/executor_local_failure.sql
@@ -0,0 +1,31 @@
+CREATE SCHEMA failure_local_modification;
+SET search_path TO failure_local_modification;
+SET citus.next_shard_id TO 1989000;
+
+SET citus.shard_replication_factor TO 1;
+CREATE TABLE failover_to_local (key int PRIMARY KEY, value varchar(10));
+SELECT create_reference_table('failover_to_local');
+
+\c - - - :worker_2_port
+
+SET search_path TO failure_local_modification;
+
+-- prevent local connection establishment, imitate
+-- a failure
+ALTER SYSTEM SET citus.local_shared_pool_size TO -1;
+SELECT pg_reload_conf();
+SELECT pg_sleep(0.2);
+BEGIN;
+ -- we force the execution to use connections (e.g., remote execution)
+ -- however, we do not allow connections as local_shared_pool_size=-1
+ -- so, properly error out
+ SET LOCAL citus.enable_local_execution TO false;
+ INSERT INTO failover_to_local VALUES (1,'1'), (2,'2'),(3,'3'),(4,'4');
+ROLLBACK;
+
+ALTER SYSTEM RESET citus.local_shared_pool_size;
+SELECT pg_reload_conf();
+
+\c - - - :master_port
+SET client_min_messages TO ERROR;
+DROP SCHEMA failure_local_modification cascade;
diff --git a/src/test/regress/sql/failure_test_helpers.sql b/src/test/regress/sql/failure_test_helpers.sql
index 7053905ac..b7f9eae3a 100644
--- a/src/test/regress/sql/failure_test_helpers.sql
+++ b/src/test/regress/sql/failure_test_helpers.sql
@@ -8,7 +8,7 @@ SELECT pg_reload_conf();
-- Add some helper functions for sending commands to mitmproxy
-CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
+CREATE OR REPLACE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
DECLARE
command ALIAS FOR $1;
BEGIN
@@ -24,58 +24,14 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
-CREATE FUNCTION citus.clear_network_traffic() RETURNS void AS $$
+CREATE OR REPLACE FUNCTION citus.clear_network_traffic() RETURNS void AS $$
BEGIN
PERFORM citus.mitmproxy('recorder.reset()');
RETURN; -- return void
END;
$$ LANGUAGE plpgsql;
-CREATE FUNCTION citus.dump_network_traffic()
-RETURNS TABLE(conn int, source text, message text) AS $$
-BEGIN
- CREATE TEMPORARY TABLE mitmproxy_command (command text) ON COMMIT DROP;
- CREATE TEMPORARY TABLE mitmproxy_result (
- conn int, source text, message text
- ) ON COMMIT DROP;
-
- INSERT INTO mitmproxy_command VALUES ('recorder.dump()');
-
- EXECUTE format('COPY mitmproxy_command TO %L', current_setting('citus.mitmfifo'));
- EXECUTE format('COPY mitmproxy_result FROM %L', current_setting('citus.mitmfifo'));
-
- RETURN QUERY SELECT * FROM mitmproxy_result;
-END;
-$$ LANGUAGE plpgsql;
-
-\c - - - :worker_2_port
-
--- Add some helper functions for sending commands to mitmproxy
-
-CREATE FUNCTION citus.mitmproxy(text) RETURNS TABLE(result text) AS $$
-DECLARE
- command ALIAS FOR $1;
-BEGIN
- CREATE TEMPORARY TABLE mitmproxy_command (command text) ON COMMIT DROP;
- CREATE TEMPORARY TABLE mitmproxy_result (res text) ON COMMIT DROP;
-
- INSERT INTO mitmproxy_command VALUES (command);
-
- EXECUTE format('COPY mitmproxy_command TO %L', current_setting('citus.mitmfifo'));
- EXECUTE format('COPY mitmproxy_result FROM %L', current_setting('citus.mitmfifo'));
-
- RETURN QUERY SELECT * FROM mitmproxy_result;
-END;
-$$ LANGUAGE plpgsql;
-
-CREATE FUNCTION citus.clear_network_traffic() RETURNS void AS $$
-BEGIN
- PERFORM citus.mitmproxy('recorder.reset()');
- RETURN; -- return void
-END;
-$$ LANGUAGE plpgsql;
-
-CREATE FUNCTION citus.dump_network_traffic()
+CREATE OR REPLACE FUNCTION citus.dump_network_traffic()
RETURNS TABLE(conn int, source text, message text) AS $$
BEGIN
CREATE TEMPORARY TABLE mitmproxy_command (command text) ON COMMIT DROP;
diff --git a/src/test/regress/sql/fkeys_between_local_ref.sql b/src/test/regress/sql/fkeys_between_local_ref.sql
index a04040474..8f92f3eea 100644
--- a/src/test/regress/sql/fkeys_between_local_ref.sql
+++ b/src/test/regress/sql/fkeys_between_local_ref.sql
@@ -100,9 +100,12 @@ BEGIN;
SELECT COUNT(*)=0 FROM citus_local_tables_in_schema;
ROLLBACK;
--- this actually attempts to convert local tables to citus local tables but errors out
--- as citus doesn't support defining foreign keys via add column commands
-ALTER TABLE local_table_1 ADD COLUMN col_3 INT REFERENCES reference_table_1(col_1);
+BEGIN;
+ ALTER TABLE local_table_1 ADD COLUMN col_3 INT REFERENCES reference_table_1(col_1);
+
+ -- show that we converted all 4 local tables in this schema to citus local tables
+ SELECT COUNT(*)=4 FROM citus_local_tables_in_schema;
+ROLLBACK;
BEGIN;
-- define a foreign key so that all 4 local tables become citus local tables
diff --git a/src/test/regress/sql/foreign_tables_mx.sql b/src/test/regress/sql/foreign_tables_mx.sql
index eec5b7316..bdef5a4c5 100644
--- a/src/test/regress/sql/foreign_tables_mx.sql
+++ b/src/test/regress/sql/foreign_tables_mx.sql
@@ -37,6 +37,11 @@ CREATE FOREIGN TABLE foreign_table (
--verify
SELECT partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid = 'foreign_table'::regclass ORDER BY logicalrelid;
+-- COPY FROM doesn't work for Citus foreign tables
+COPY foreign_table FROM stdin;
+1 1foo 2
+\.
+
CREATE TABLE parent_for_foreign_tables (
project_id integer
) PARTITION BY HASH (project_id);
diff --git a/src/test/regress/sql/global_cancel.sql b/src/test/regress/sql/global_cancel.sql
index 4a6157489..848c3b01a 100644
--- a/src/test/regress/sql/global_cancel.sql
+++ b/src/test/regress/sql/global_cancel.sql
@@ -47,9 +47,12 @@ RESET client_min_messages;
SELECT pg_typeof(:maintenance_daemon_gpid);
+\set VERBOSITY terse
+
SELECT pg_cancel_backend(:maintenance_daemon_gpid);
SELECT pg_terminate_backend(:maintenance_daemon_gpid);
+\set VERBOSITY default
-- we can cancel our own backend
SELECT pg_cancel_backend(citus_backend_gpid());
diff --git a/src/test/regress/sql/grant_on_database_propagation.sql b/src/test/regress/sql/grant_on_database_propagation.sql
new file mode 100644
index 000000000..00b9cddb8
--- /dev/null
+++ b/src/test/regress/sql/grant_on_database_propagation.sql
@@ -0,0 +1,378 @@
+-- Public role has connect,temp,temporary privileges on database
+-- To test these scenarios, we need to revoke these privileges from public role
+-- since public role privileges are inherited by new roles/users
+revoke connect,temp,temporary on database regression from public;
+
+CREATE SCHEMA grant_on_database_propagation;
+SET search_path TO grant_on_database_propagation;
+
+-- test grant/revoke CREATE privilege propagation on database
+create user myuser;
+
+grant create on database regression to myuser;
+
+select has_database_privilege('myuser','regression', 'CREATE');
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CREATE');
+\c - - - :master_port
+
+revoke create on database regression from myuser;
+
+
+select has_database_privilege('myuser','regression', 'CREATE');
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CREATE');
+\c - - - :master_port
+
+drop user myuser;
+-----------------------------------------------------------------------
+
+-- test grant/revoke CONNECT privilege propagation on database
+create user myuser;
+
+grant CONNECT on database regression to myuser;
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CONNECT');
+\c - - - :master_port
+
+revoke connect on database regression from myuser;
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CONNECT');
+\c - - - :master_port
+
+drop user myuser;
+
+-----------------------------------------------------------------------
+
+-- test grant/revoke TEMP privilege propagation on database
+create user myuser;
+
+-- test grant/revoke temp on database
+grant TEMP on database regression to myuser;
+
+select has_database_privilege('myuser','regression', 'TEMP');
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'TEMP');
+\c - - - :master_port
+
+revoke TEMP on database regression from myuser;
+
+select has_database_privilege('myuser','regression', 'TEMP');
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'TEMP');
+\c - - - :master_port
+
+drop user myuser;
+
+-----------------------------------------------------------------------
+
+-- test temporary privilege on database
+create user myuser;
+
+-- test grant/revoke temporary on database
+grant TEMPORARY on database regression to myuser;
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :master_port
+
+revoke TEMPORARY on database regression from myuser;
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :master_port
+
+drop user myuser;
+-----------------------------------------------------------------------
+
+-- test ALL privileges with ALL statement on database
+create user myuser;
+
+grant ALL on database regression to myuser;
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :master_port
+
+
+revoke ALL on database regression from myuser;
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :master_port
+
+drop user myuser;
+-----------------------------------------------------------------------
+
+-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database
+create user myuser;
+
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :worker_1_port;
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :master_port
+
+RESET ROLE;
+
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+\c - - - :master_port
+
+drop user myuser;
+-----------------------------------------------------------------------
+
+-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option
+create user myuser;
+create user myuser_1;
+
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser;
+
+set role myuser;
+--here since myuser does not have grant option, it should fail
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1;
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+\c - - - :master_port
+
+RESET ROLE;
+
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser with grant option;
+set role myuser;
+
+--here since myuser have grant option, it should succeed
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression to myuser_1 granted by myuser;
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+\c - - - :master_port
+
+
+RESET ROLE;
+
+--below test should fail and should throw an error since myuser_1 still have the dependent privileges
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict;
+--below test should fail and should throw an error since myuser_1 still have the dependent privileges
+revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser restrict ;
+
+--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade
+revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser cascade ;
+
+--here we test if myuser still have the privileges after revoke grant option for
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+
+\c - - - :master_port
+
+reset role;
+
+
+
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser;
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression from myuser_1;
+
+drop user myuser_1;
+drop user myuser;
+
+-----------------------------------------------------------------------
+
+-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database
+-- and multi user
+
+create user myuser;
+create user myuser_1;
+
+create database test_db;
+SELECT result FROM run_command_on_workers($$create database test_db$$);
+revoke connect,temp,temporary on database test_db from public;
+
+grant CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db to myuser,myuser_1;
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+select has_database_privilege('myuser','test_db', 'CONNECT');
+select has_database_privilege('myuser','test_db', 'TEMP');
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+select has_database_privilege('myuser','test_db', 'CONNECT');
+select has_database_privilege('myuser','test_db', 'TEMP');
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+
+\c - - - :master_port
+
+RESET ROLE;
+--below test should fail and should throw an error
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser ;
+
+--below test should succeed and should not throw any error
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser_1;
+
+--below test should succeed and should not throw any error
+revoke CREATE,CONNECT,TEMP,TEMPORARY on database regression,test_db from myuser cascade;
+
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+select has_database_privilege('myuser','test_db', 'CONNECT');
+select has_database_privilege('myuser','test_db', 'TEMP');
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+
+\c - - - :worker_1_port
+
+select has_database_privilege('myuser','regression', 'CREATE');
+select has_database_privilege('myuser','regression', 'CONNECT');
+select has_database_privilege('myuser','regression', 'TEMP');
+select has_database_privilege('myuser','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser','test_db', 'CREATE');
+select has_database_privilege('myuser','test_db', 'CONNECT');
+select has_database_privilege('myuser','test_db', 'TEMP');
+select has_database_privilege('myuser','test_db', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','regression', 'CREATE');
+select has_database_privilege('myuser_1','regression', 'CONNECT');
+select has_database_privilege('myuser_1','regression', 'TEMP');
+select has_database_privilege('myuser_1','regression', 'TEMPORARY');
+
+select has_database_privilege('myuser_1','test_db', 'CREATE');
+select has_database_privilege('myuser_1','test_db', 'CONNECT');
+select has_database_privilege('myuser_1','test_db', 'TEMP');
+select has_database_privilege('myuser_1','test_db', 'TEMPORARY');
+
+\c - - - :master_port
+
+reset role;
+
+drop user myuser_1;
+drop user myuser;
+
+drop database test_db;
+SELECT result FROM run_command_on_workers($$drop database test_db$$);
+---------------------------------------------------------------------------
+-- rollbacks public role database privileges to original state
+grant connect,temp,temporary on database regression to public;
+
+
+SET client_min_messages TO ERROR;
+DROP SCHEMA grant_on_database_propagation CASCADE;
+
+---------------------------------------------------------------------------
diff --git a/src/test/regress/sql/insert_select_repartition.sql b/src/test/regress/sql/insert_select_repartition.sql
index 4d13a83f4..30d77f5b8 100644
--- a/src/test/regress/sql/insert_select_repartition.sql
+++ b/src/test/regress/sql/insert_select_repartition.sql
@@ -611,7 +611,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
-GROUP BY c1, c2, c3, c4, c5, c6
+GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@@ -625,7 +625,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
-GROUP BY c1, c2, c3, c4, c5, c6
+GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
diff --git a/src/test/regress/sql/insert_select_single_shard_table.sql b/src/test/regress/sql/insert_select_single_shard_table.sql
index fb080d206..66a5ef9d1 100644
--- a/src/test/regress/sql/insert_select_single_shard_table.sql
+++ b/src/test/regress/sql/insert_select_single_shard_table.sql
@@ -355,12 +355,10 @@ SET client_min_messages TO DEBUG1;
INSERT INTO nullkey_c1_t1 SELECT DISTINCT ON (a) a, b FROM nullkey_c1_t2;
-SET client_min_messages TO DEBUG2;
-
--- Similarly, we could push down the following query as well. see
--- https://github.com/citusdata/citus/pull/6831.
+-- keep low verbosity as PG15 and PG14 produces slightly different outputs
INSERT INTO nullkey_c1_t1 SELECT b, SUM(a) OVER (ORDER BY b) AS sum_val FROM nullkey_c1_t1;
+SET client_min_messages TO DEBUG2;
INSERT INTO nullkey_c2_t1
SELECT t2.a, t2.b
FROM nullkey_c1_t1 AS t2
diff --git a/src/test/regress/sql/intermediate_results.sql b/src/test/regress/sql/intermediate_results.sql
index 4cd54b29b..2eaa6e715 100644
--- a/src/test/regress/sql/intermediate_results.sql
+++ b/src/test/regress/sql/intermediate_results.sql
@@ -255,7 +255,6 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
-- test refreshing mat views
SET client_min_messages TO ERROR;
CREATE USER some_other_user;
-SELECT run_command_on_workers($$GRANT ALL ON DATABASE regression TO some_other_user;$$);
GRANT ALL ON DATABASE regression TO some_other_user;
RESET client_min_messages;
diff --git a/src/test/regress/sql/local_dist_join_mixed.sql b/src/test/regress/sql/local_dist_join_mixed.sql
index b07da2fc8..c6eb53d4e 100644
--- a/src/test/regress/sql/local_dist_join_mixed.sql
+++ b/src/test/regress/sql/local_dist_join_mixed.sql
@@ -78,14 +78,13 @@ SELECT count(*) FROM distributed JOIN unlogged_local USING (id);
CREATE MATERIALIZED VIEW mat_view AS SELECT * FROM local;
SELECT count(*) FROM distributed JOIN mat_view USING (id);
-CREATE VIEW local_regular_view AS SELECT * FROM local;
+CREATE VIEW local_regular_view AS SELECT * FROM local table_name_for_view;
CREATE VIEW dist_regular_view AS SELECT * FROM distributed;
SELECT count(*) FROM distributed JOIN local_regular_view USING (id);
SELECT count(*) FROM local JOIN dist_regular_view USING (id);
SELECT count(*) FROM dist_regular_view JOIN local_regular_view USING (id);
-
-- join alias/table alias
SELECT COUNT(*) FROM (distributed JOIN local USING (id)) AS t(a,b,c,d) ORDER BY d,c,a,b LIMIT 3;
SELECT COUNT(*) FROM (distributed d1(x,y,y1) JOIN local l1(x,t) USING (x)) AS t(a,b,c,d) ORDER BY d,c,a,b LIMIT 3;
diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql
index 4bf5aeec4..8acbc2978 100644
--- a/src/test/regress/sql/local_shard_execution.sql
+++ b/src/test/regress/sql/local_shard_execution.sql
@@ -211,7 +211,7 @@ RETURNING *;
-- INSERT..SELECT via coordinator consists of two steps, select + COPY
-- that's why it is disallowed to use local execution even if the SELECT
-- can be executed locally
-INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
+INSERT INTO distributed_table SELECT sum(key), value FROM distributed_table WHERE key = 1 GROUP BY value ON CONFLICT DO NOTHING;
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
-- sanity check: multi-shard INSERT..SELECT pushdown goes through distributed execution
diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql
index 89a4c61f5..d7e4cc064 100644
--- a/src/test/regress/sql/local_shard_execution_replicated.sql
+++ b/src/test/regress/sql/local_shard_execution_replicated.sql
@@ -176,7 +176,7 @@ RETURNING *;
-- INSERT..SELECT via coordinator consists of two steps, select + COPY
-- that's why it is disallowed to use local execution even if the SELECT
-- can be executed locally
-INSERT INTO distributed_table SELECT * FROM distributed_table WHERE key = 1 OFFSET 0 ON CONFLICT DO NOTHING;
+INSERT INTO distributed_table SELECT sum(key), value, max(age) FROM distributed_table WHERE key = 1 GROUP BY value ON CONFLICT DO NOTHING;
INSERT INTO distributed_table SELECT 1, '1',15 FROM distributed_table WHERE key = 2 LIMIT 1 ON CONFLICT DO NOTHING;
-- sanity check: multi-shard INSERT..SELECT pushdown goes through distributed execution
diff --git a/src/test/regress/sql/local_table_join.sql b/src/test/regress/sql/local_table_join.sql
index 8d0d7d332..393b15378 100644
--- a/src/test/regress/sql/local_table_join.sql
+++ b/src/test/regress/sql/local_table_join.sql
@@ -362,9 +362,6 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
- where (
- select true as bool from pg_catalog.pg_am limit 1
- )
) as subq_1
) as subq_2;
@@ -379,9 +376,6 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
- where (
- select true as bool from pg_catalog.pg_am limit 1
- )
) as subq_1
) as subq_2;
diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql
index 4fb911736..a41e80841 100644
--- a/src/test/regress/sql/merge.sql
+++ b/src/test/regress/sql/merge.sql
@@ -2366,118 +2366,6 @@ UPDATE SET val = dist_source.val
WHEN NOT MATCHED THEN
INSERT VALUES(dist_source.id, dist_source.val);
--- test merge with single-shard tables
-
-CREATE SCHEMA query_single_shard_table;
-
-SET search_path TO query_single_shard_table;
-
-CREATE TABLE nullkey_c1_t1(a int, b int);
-CREATE TABLE nullkey_c1_t2(a int, b int);
-SELECT create_distributed_table('nullkey_c1_t1', null, colocate_with=>'none');
-SELECT create_distributed_table('nullkey_c1_t2', null, colocate_with=>'nullkey_c1_t1');
-
-CREATE TABLE nullkey_c2_t1(a int, b int);
-CREATE TABLE nullkey_c2_t2(a int, b int);
-SELECT create_distributed_table('nullkey_c2_t1', null, colocate_with=>'none');
-SELECT create_distributed_table('nullkey_c2_t2', null, colocate_with=>'nullkey_c2_t1', distribution_type=>null);
-
-CREATE TABLE reference_table(a int, b int);
-CREATE TABLE distributed_table(a int, b int);
-CREATE TABLE citus_local_table(a int, b int);
-SELECT create_reference_table('reference_table');
-SELECT create_distributed_table('distributed_table', 'a');
-SELECT citus_add_local_table_to_metadata('citus_local_table');
-
-SET client_min_messages TO DEBUG2;
-INSERT INTO reference_table SELECT i, i FROM generate_series(0, 5) i;
-
-INSERT INTO distributed_table SELECT i, i FROM generate_series(3, 8) i;
-
-INSERT INTO citus_local_table SELECT i, i FROM generate_series(0, 10) i;
-
-CREATE TABLE postgres_local_table(a int, b int);
-INSERT INTO postgres_local_table SELECT i, i FROM generate_series(5, 10) i;
-
--- with a colocated table
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b;
-
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN DELETE;
-
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
-
-MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
-WHEN MATCHED THEN DELETE
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
-
--- with non-colocated single-shard table
-MERGE INTO nullkey_c1_t1 USING nullkey_c2_t1 ON (nullkey_c1_t1.a = nullkey_c2_t1.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b;
-
-MERGE INTO nullkey_c1_t1 USING nullkey_c2_t1 ON (nullkey_c1_t1.a = nullkey_c2_t1.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b);
-
--- with a distributed table
-MERGE INTO nullkey_c1_t1 USING distributed_table ON (nullkey_c1_t1.a = distributed_table.a)
-WHEN MATCHED THEN UPDATE SET b = distributed_table.b
-WHEN NOT MATCHED THEN INSERT VALUES (distributed_table.a, distributed_table.b);
-
-MERGE INTO distributed_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = distributed_table.a)
-WHEN MATCHED THEN DELETE
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
-
--- with a reference table
-MERGE INTO nullkey_c1_t1 USING reference_table ON (nullkey_c1_t1.a = reference_table.a)
-WHEN MATCHED THEN UPDATE SET b = reference_table.b;
-
-MERGE INTO reference_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = reference_table.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
-
--- with a citus local table
-MERGE INTO nullkey_c1_t1 USING citus_local_table ON (nullkey_c1_t1.a = citus_local_table.a)
-WHEN MATCHED THEN UPDATE SET b = citus_local_table.b;
-
-MERGE INTO citus_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = citus_local_table.a)
-WHEN MATCHED THEN DELETE;
-
--- with a postgres table
-MERGE INTO nullkey_c1_t1 USING postgres_local_table ON (nullkey_c1_t1.a = postgres_local_table.a)
-WHEN MATCHED THEN UPDATE SET b = postgres_local_table.b;
-
-MERGE INTO postgres_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = postgres_local_table.a)
-WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
-WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
-
--- using ctes
-WITH cte AS (
- SELECT * FROM nullkey_c1_t1
-)
-MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
-WHEN MATCHED THEN UPDATE SET b = cte.b;
-
-WITH cte AS (
- SELECT * FROM distributed_table
-)
-MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
-WHEN MATCHED THEN UPDATE SET b = cte.b;
-
-WITH cte AS materialized (
- SELECT * FROM distributed_table
-)
-MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
-WHEN MATCHED THEN UPDATE SET b = cte.b;
-
-SET client_min_messages TO WARNING;
-DROP SCHEMA query_single_shard_table CASCADE;
-
-SET search_path TO merge_schema;
-
-- Test Columnar table
CREATE TABLE target_columnar(cid int, name text) USING columnar;
SELECT create_distributed_table('target_columnar', 'cid');
@@ -2586,6 +2474,13 @@ ON (t1.a = t2.a AND (SELECT max(a) > 55 FROM cte_2))
WHEN MATCHED THEN
DELETE;
+-- Datatype mismatch between target and source join column
+WITH src AS (SELECT FLOOR(b) AS a FROM source_2)
+MERGE INTO target_1 t
+USING src
+ON t.a = src.a
+WHEN MATCHED THEN DELETE;
+
RESET client_min_messages;
DROP SERVER foreign_server CASCADE;
DROP FUNCTION merge_when_and_write();
diff --git a/src/test/regress/sql/merge_schema_sharding.sql b/src/test/regress/sql/merge_schema_sharding.sql
new file mode 100644
index 000000000..8ea947c1c
--- /dev/null
+++ b/src/test/regress/sql/merge_schema_sharding.sql
@@ -0,0 +1,148 @@
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
+\gset
+\if :server_version_ge_15
+\else
+\q
+\endif
+
+-- MERGE command performs a join from data_source to target_table_name
+DROP SCHEMA IF EXISTS schema_shard_table1 CASCADE;
+DROP SCHEMA IF EXISTS schema_shard_table2 CASCADE;
+DROP SCHEMA IF EXISTS schema_shard_table CASCADE;
+
+-- test merge with schema-shard tables
+SET citus.shard_replication_factor TO 1;
+SET citus.max_adaptive_executor_pool_size TO 1;
+SET citus.next_shard_id TO 4005000;
+SET citus.enable_repartition_joins TO true;
+
+CREATE SCHEMA schema_shard_table;
+SET search_path TO schema_shard_table;
+CREATE TABLE reference_table(a int, b int);
+CREATE TABLE distributed_table(a int, b int);
+CREATE TABLE citus_local_table(a int, b int);
+CREATE TABLE postgres_local_table(a int, b int);
+
+INSERT INTO reference_table SELECT i, i FROM generate_series(0, 5) i;
+INSERT INTO distributed_table SELECT i, i FROM generate_series(3, 8) i;
+INSERT INTO citus_local_table SELECT i, i FROM generate_series(0, 10) i;
+INSERT INTO postgres_local_table SELECT i, i FROM generate_series(5, 10) i;
+
+SELECT create_reference_table('reference_table');
+SELECT create_distributed_table('distributed_table', 'a');
+SELECT citus_add_local_table_to_metadata('citus_local_table');
+
+SET citus.enable_schema_based_sharding TO ON;
+CREATE SCHEMA schema_shard_table1;
+CREATE SCHEMA schema_shard_table2;
+
+SET search_path TO schema_shard_table1;
+CREATE TABLE nullkey_c1_t1(a int, b int);
+CREATE TABLE nullkey_c1_t2(a int, b int);
+INSERT INTO nullkey_c1_t1 SELECT i, i FROM generate_series(0, 5) i;
+INSERT INTO nullkey_c1_t2 SELECT i, i FROM generate_series(3, 8) i;
+
+SET search_path TO schema_shard_table2;
+CREATE TABLE nullkey_c2_t1(a int, b int);
+CREATE TABLE nullkey_c2_t2(a int, b int);
+INSERT INTO nullkey_c2_t1 SELECT i, i FROM generate_series(0, 5) i;
+INSERT INTO nullkey_c2_t2 SELECT i, i FROM generate_series(3, 8) i;
+
+SET search_path TO schema_shard_table1;
+
+-- with a colocated table
+SET client_min_messages TO DEBUG2;
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b;
+
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN DELETE;
+
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t2.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
+
+MERGE INTO nullkey_c1_t1 USING nullkey_c1_t2 ON (nullkey_c1_t1.a = nullkey_c1_t2.a)
+WHEN MATCHED THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t2.a, nullkey_c1_t2.b);
+
+SET search_path TO schema_shard_table2;
+
+-- with non-colocated schema-shard table
+MERGE INTO schema_shard_table1.nullkey_c1_t1 USING nullkey_c2_t1 ON (schema_shard_table1.nullkey_c1_t1.a = nullkey_c2_t1.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b;
+
+MERGE INTO schema_shard_table1.nullkey_c1_t1 USING nullkey_c2_t1 ON (schema_shard_table1.nullkey_c1_t1.a = nullkey_c2_t1.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b);
+
+-- with a distributed table
+SET search_path TO schema_shard_table1;
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.distributed_table ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.distributed_table.b
+WHEN NOT MATCHED THEN INSERT VALUES (schema_shard_table.distributed_table.a, schema_shard_table.distributed_table.b);
+
+MERGE INTO schema_shard_table.distributed_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a)
+WHEN MATCHED THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
+
+RESET client_min_messages;
+SELECT count(*) FROM schema_shard_table.distributed_table WHERE a in (0, 1, 2);
+MERGE INTO schema_shard_table.distributed_table
+USING (SELECT s1.a AS s1a, s2.b AS s2b
+ FROM nullkey_c1_t1 s1 JOIN schema_shard_table2.nullkey_c2_t1 s2
+ ON s1.a = s2.a) src
+ON (src.s1a = schema_shard_table.distributed_table.a)
+WHEN MATCHED THEN DELETE
+WHEN NOT MATCHED THEN INSERT VALUES (src.s1a, src.s2b);
+-- Three matching rows must be deleted
+SELECT count(*) FROM schema_shard_table.distributed_table WHERE a in (0, 1, 2);
+
+-- with a reference table
+SET client_min_messages TO DEBUG2;
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.reference_table ON (nullkey_c1_t1.a = schema_shard_table.reference_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.reference_table.b;
+
+MERGE INTO schema_shard_table.reference_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.reference_table.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
+
+-- with a citus local table
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.citus_local_table ON (nullkey_c1_t1.a = schema_shard_table.citus_local_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.citus_local_table.b;
+
+MERGE INTO schema_shard_table.citus_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.citus_local_table.a)
+WHEN MATCHED THEN DELETE;
+
+-- with a postgres table
+MERGE INTO nullkey_c1_t1 USING schema_shard_table.postgres_local_table ON (nullkey_c1_t1.a = schema_shard_table.postgres_local_table.a)
+WHEN MATCHED THEN UPDATE SET b = schema_shard_table.postgres_local_table.b;
+
+MERGE INTO schema_shard_table.postgres_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.postgres_local_table.a)
+WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b
+WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b);
+
+-- using ctes
+WITH cte AS (
+ SELECT * FROM nullkey_c1_t1
+)
+MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
+WHEN MATCHED THEN UPDATE SET b = cte.b;
+
+WITH cte AS (
+ SELECT * FROM schema_shard_table.distributed_table
+)
+MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
+WHEN MATCHED THEN UPDATE SET b = cte.b;
+
+WITH cte AS materialized (
+ SELECT * FROM schema_shard_table.distributed_table
+)
+MERGE INTO nullkey_c1_t1 USING cte ON (nullkey_c1_t1.a = cte.a)
+WHEN MATCHED THEN UPDATE SET b = cte.b;
+
+SET client_min_messages TO WARNING;
+DROP SCHEMA schema_shard_table1 CASCADE;
+DROP SCHEMA schema_shard_table2 CASCADE;
+DROP SCHEMA schema_shard_table CASCADE;
diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql
index 1c5d5b15d..a4044bab3 100644
--- a/src/test/regress/sql/metadata_sync_helpers.sql
+++ b/src/test/regress/sql/metadata_sync_helpers.sql
@@ -798,8 +798,19 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus_internal gpid=10000000001';
-- with an ugly trick, update the vartype of table from int to bigint
-- so that making two tables colocated fails
- UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}'
+
+ -- include varnullingrels for PG16
+ SHOW server_version \gset
+ SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+ \gset
+ \if :server_version_ge_16
+ UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
+ \else
+ UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
+ WHERE logicalrelid = 'test_2'::regclass;
+ \endif
+
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
ROLLBACK;
diff --git a/src/test/regress/sql/multi_alter_table_add_constraints.sql b/src/test/regress/sql/multi_alter_table_add_constraints.sql
index a82bb64ca..dfc31dc51 100644
--- a/src/test/regress/sql/multi_alter_table_add_constraints.sql
+++ b/src/test/regress/sql/multi_alter_table_add_constraints.sql
@@ -563,6 +563,19 @@ SELECT create_distributed_table('alter_add_unique', 'x');
ALTER TABLE alter_add_unique ADD CONSTRAINT unique_constraint_test UNIQUE USING INDEX alter_unique_idx;
ALTER TABLE alter_add_unique DROP CONSTRAINT unique_constraint_test;
+CREATE TABLE unique_test_table_single_shard(id int, name varchar(20));
+SELECT create_distributed_table('unique_test_table_single_shard', 'id', shard_count=>1);
+
+ALTER TABLE unique_test_table_single_shard ADD UNIQUE(id, name) WITH (fillfactor=20);
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_index_defs FROM get_index_defs('sc3', 'unique_test_table_single_shard')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+DROP TABLE unique_test_table_single_shard;
+
SET search_path TO 'public';
DROP SCHEMA sc1 CASCADE;
diff --git a/src/test/regress/sql/multi_alter_table_statements.sql b/src/test/regress/sql/multi_alter_table_statements.sql
index f814caf10..10e52cb37 100644
--- a/src/test/regress/sql/multi_alter_table_statements.sql
+++ b/src/test/regress/sql/multi_alter_table_statements.sql
@@ -2,8 +2,9 @@
-- MULTI_ALTER_TABLE_STATEMENTS
--
-ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000;
-
+CREATE SCHEMA multi_alter_table_statements;
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 220000;
-- Check that we can run ALTER TABLE statements on distributed tables.
-- We set the shardid sequence here so that the shardids in this test
@@ -38,6 +39,8 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 221000;
-- Verify that we can add columns
@@ -55,8 +58,10 @@ FROM
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
ORDER BY attnum;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 222000;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
@@ -75,7 +80,7 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
-- Verify that SET NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
-- Drop default so that NULLs will be inserted for this column
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
@@ -90,7 +95,7 @@ END;
-- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
-- COPY should succeed now
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
@@ -102,7 +107,7 @@ SELECT count(*) from lineitem_alter;
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
@@ -130,19 +135,19 @@ ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2;
ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey;
SELECT SUM(l_orderkey) FROM lineitem_alter;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
-- Verify that we can execute commands with multiple subcommands
ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
ADD COLUMN int_column2 INTEGER;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER,
ALTER COLUMN int_column1 SET STATISTICS 10;
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
-- Verify that we cannot execute alter commands on the distribution column
@@ -174,7 +179,7 @@ ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2;
-- Verify that none of the failed alter table commands took effect on the master
-- node
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
-- verify that non-propagated ddl commands are allowed inside a transaction block
SET citus.enable_ddl_propagation to false;
@@ -198,7 +203,7 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
ALTER TABLE lineitem_alter ADD COLUMN first integer;
COMMIT;
-SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
+SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='lineitem_alter'::regclass;
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
relid = 'temp_index_2'::regclass;
@@ -241,8 +246,10 @@ DROP INDEX temp_index_2;
-- Add column on only one worker...
\c - - - :worker_2_port
-ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer;
+ALTER TABLE multi_alter_table_statements.lineitem_alter_220000 ADD COLUMN first integer;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 223000;
-- and try to add it in a multi-statement block, which fails
BEGIN;
@@ -288,7 +295,7 @@ ALTER TABLE single_shard_items REPLICA IDENTITY default;
-- Drop the column from the worker...
\c - - - :worker_2_port
-ALTER TABLE lineitem_alter_220000 DROP COLUMN first;
+ALTER TABLE multi_alter_table_statements.lineitem_alter_220000 DROP COLUMN first;
-- Create table to trigger at-xact-end (deferred) failure
CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED);
@@ -305,6 +312,8 @@ RESET citus.enable_metadata_sync;
CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag();
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 224000;
-- The above trigger will cause failure at transaction end on one placement.
-- Citus always uses 2PC. 2PC should handle this "best" (no divergence)
BEGIN;
@@ -321,6 +330,8 @@ DROP FUNCTION log_ddl_tag();
DROP TABLE ddl_commands;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 225000;
-- Distributed SELECTs may appear after ALTER
BEGIN;
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
@@ -360,6 +371,8 @@ FROM
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
ORDER BY attnum;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 226000;
-- verify that we can rename distributed tables
SHOW citus.enable_ddl_propagation;
@@ -372,29 +385,37 @@ SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed';
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 227000;
-- revert it to original name
ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
-- show rename worked on one worker, too
\c - - - :worker_1_port
-SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
+SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_222001' /* failed copy trails */ ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 228000;
-- verify that we can set and reset storage parameters
ALTER TABLE lineitem_alter SET(fillfactor=40);
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
\c - - - :worker_1_port
-SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
+SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_222001' /* failed copy trails */ ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 229000;
ALTER TABLE lineitem_alter RESET(fillfactor);
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
\c - - - :worker_1_port
-SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
+SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_222001' /* failed copy trails */ ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 230000;
-- verify that we can rename indexes on distributed tables
CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber);
@@ -407,6 +428,8 @@ SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber';
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 231000;
-- now get rid of the index
DROP INDEX idx_lineitem_linenumber;
@@ -427,8 +450,10 @@ ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int;
-- verify newly added column is not present in a worker shard
\c - - - :worker_1_port
-SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0;
+SELECT column_only_added_to_master FROM multi_alter_table_statements.lineitem_alter_220000 LIMIT 0;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 232000;
-- ddl propagation flag is reset to default, disable it again
SET citus.enable_ddl_propagation to false;
@@ -458,6 +483,8 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
\c - - - :worker_1_port
SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%';
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 233000;
-- verify alter table and drop sequence in the same transaction does not cause deadlock
SET citus.shard_count TO 4;
@@ -489,7 +516,7 @@ SELECT create_distributed_table('trigger_table', 'id');
-- first set a trigger on a shard
\c - - - :worker_1_port
SET citus.enable_metadata_sync TO OFF;
-CREATE FUNCTION update_value() RETURNS trigger AS $up$
+CREATE OR REPLACE FUNCTION update_value() RETURNS trigger AS $up$
BEGIN
NEW.value := 'trigger enabled';
RETURN NEW;
@@ -498,10 +525,12 @@ $up$ LANGUAGE plpgsql;
RESET citus.enable_metadata_sync;
CREATE TRIGGER update_value
-BEFORE INSERT ON trigger_table_220017
+BEFORE INSERT ON multi_alter_table_statements.trigger_table_233004
FOR EACH ROW EXECUTE PROCEDURE update_value();
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 234000;
INSERT INTO trigger_table VALUES (1, 'trigger disabled');
SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value;
@@ -529,32 +558,36 @@ SET citus.enable_ddl_propagation to true;
CREATE USER alter_table_owner WITH LOGIN;
GRANT USAGE ON SCHEMA public TO alter_table_owner;
+GRANT USAGE ON SCHEMA multi_alter_table_statements TO alter_table_owner;
\c - alter_table_owner - :master_port
-- should not be able to access table without permission
-SELECT count(*) FROM lineitem_alter;
+SELECT count(*) FROM multi_alter_table_statements.lineitem_alter;
-- should not be able to drop the table as non table owner
-DROP TABLE lineitem_alter;
+DROP TABLE multi_alter_table_statements.lineitem_alter;
\c - postgres - :master_port
-ALTER TABLE lineitem_alter OWNER TO alter_table_owner;
+ALTER TABLE multi_alter_table_statements.lineitem_alter OWNER TO alter_table_owner;
\c - alter_table_owner - :master_port
-- should be able to query the table as table owner
-SELECT count(*) FROM lineitem_alter;
+SELECT count(*) FROM multi_alter_table_statements.lineitem_alter;
-- should be able to drop the table as table owner
-DROP TABLE lineitem_alter;
+DROP TABLE multi_alter_table_statements.lineitem_alter;
-- check that nothing's left over on workers, other than the leftover shard created
-- during the unsuccessful COPY
\c - postgres - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 235000;
-- drop the roles created
REVOKE ALL ON SCHEMA PUBLIC FROM alter_table_owner;
+REVOKE ALL ON SCHEMA multi_alter_table_statements FROM alter_table_owner;
DROP ROLE alter_table_owner;
-- Test alter table with drop table in the same transaction
@@ -569,6 +602,8 @@ END;
\c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%';
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 236000;
-- verify logged info is propagated to workers when distributing the table
CREATE TABLE logged_test(id int);
@@ -577,6 +612,8 @@ SELECT create_distributed_table('logged_test', 'id');
\c - - - :worker_1_port
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 237000;
-- verify SET LOGGED/UNLOGGED works after distributing the table
ALTER TABLE logged_test SET LOGGED;
@@ -584,11 +621,15 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg
\c - - - :worker_1_port
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 238000;
ALTER TABLE logged_test SET UNLOGGED;
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
\c - - - :worker_1_port
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test_' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 239000;
DROP TABLE logged_test;
-- Test WITH options on a normal simple hash-distributed table
@@ -601,6 +642,8 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist';
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist_%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 240000;
-- verify that we can set and reset index storage parameters
ALTER INDEX hash_dist_pkey SET(fillfactor=40);
@@ -609,6 +652,8 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 241000;
ALTER INDEX hash_dist_pkey RESET(fillfactor);
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
@@ -616,6 +661,8 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey_%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 242000;
-- verify error message on ALTER INDEX, SET TABLESPACE is unsupported
ALTER INDEX hash_dist_pkey SET TABLESPACE foo;
@@ -629,6 +676,8 @@ SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index';
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index_%' ORDER BY relname;
\c - - - :master_port
+SET search_path TO multi_alter_table_statements, public;
+SET citus.next_shard_id TO 243000;
-- get rid of the index
DROP INDEX another_index;
@@ -645,13 +694,24 @@ ALTER TABLE test_table_1 ADD COLUMN test_col int CHECK (test_col > 3);
CREATE TABLE reference_table(i int UNIQUE);
SELECT create_reference_table('reference_table');
-ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE;
-ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL;
-DROP TABLE reference_table;
+
+ALTER TABLE test_table_1 ADD COLUMN test_col_1 int REFERENCES reference_table(i) ON DELETE CASCADE;
+ALTER TABLE test_table_1 ADD COLUMN test_col_2 int REFERENCES reference_table(i) ON DELETE CASCADE ON UPDATE SET NULL;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('multi_alter_table_statements.test_table_1')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+BEGIN;
+ SET LOCAL client_min_messages TO WARNING;
+ DROP TABLE reference_table CASCADE;
+COMMIT;
CREATE TABLE referenced_table(i int UNIQUE);
SELECT create_distributed_table('referenced_table', 'i');
-ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i);
+ALTER TABLE test_table_1 ADD COLUMN test_col_3 int REFERENCES referenced_table(i);
DROP TABLE referenced_table, test_table_1;
-- Check sequence propagate its own dependencies while adding a column
@@ -667,5 +727,7 @@ ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_sch
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass);
SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace);
+SET client_min_messages TO WARNING;
DROP SCHEMA test_schema_for_sequence_propagation CASCADE;
DROP TABLE table_without_sequence;
+DROP SCHEMA multi_alter_table_statements CASCADE;
diff --git a/src/test/regress/sql/multi_complex_count_distinct.sql b/src/test/regress/sql/multi_complex_count_distinct.sql
index 9957d0959..0e06fc0c8 100644
--- a/src/test/regress/sql/multi_complex_count_distinct.sql
+++ b/src/test/regress/sql/multi_complex_count_distinct.sql
@@ -1,7 +1,13 @@
--
-- COMPLEX_COUNT_DISTINCT
--
-
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
diff --git a/src/test/regress/sql/multi_create_fdw.sql b/src/test/regress/sql/multi_create_fdw.sql
index ea9333781..cb780a5c2 100644
--- a/src/test/regress/sql/multi_create_fdw.sql
+++ b/src/test/regress/sql/multi_create_fdw.sql
@@ -7,7 +7,12 @@ SET citus.next_shard_id TO 390000;
-- ===================================================================
-- create fake fdw for use in tests
-CREATE FUNCTION fake_fdw_handler()
+SET client_min_messages TO WARNING;
+DROP SERVER IF EXISTS fake_fdw_server CASCADE;
+DROP FOREIGN DATA WRAPPER IF EXISTS fake_fdw CASCADE;
+RESET client_min_messages;
+
+CREATE OR REPLACE FUNCTION fake_fdw_handler()
RETURNS fdw_handler
AS 'citus'
LANGUAGE C STRICT;
diff --git a/src/test/regress/sql/multi_deparse_shard_query.sql b/src/test/regress/sql/multi_deparse_shard_query.sql
index 252f22fb9..faffdf862 100644
--- a/src/test/regress/sql/multi_deparse_shard_query.sql
+++ b/src/test/regress/sql/multi_deparse_shard_query.sql
@@ -8,6 +8,9 @@
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15;
+CREATE SCHEMA multi_deparse_shard_query;
+SET search_path TO multi_deparse_shard_query;
+
SET citus.next_shard_id TO 13100000;
SET citus.shard_replication_factor TO 1;
@@ -304,3 +307,6 @@ SELECT
FROM
raw_events_1;
');
+
+SET client_min_messages TO ERROR;
+DROP SCHEMA multi_deparse_shard_query CASCADE;
diff --git a/src/test/regress/sql/multi_explain.sql b/src/test/regress/sql/multi_explain.sql
index dd4615434..7fa75c8be 100644
--- a/src/test/regress/sql/multi_explain.sql
+++ b/src/test/regress/sql/multi_explain.sql
@@ -1,6 +1,13 @@
--
-- MULTI_EXPLAIN
--
+-- This test file has an alternative output because of the following in PG16:
+-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
+-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
+-- The alternative output can be deleted when we drop support for PG15
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
SET citus.next_shard_id TO 570000;
diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql
index 72d939867..8cbbbc3ed 100644
--- a/src/test/regress/sql/multi_extension.sql
+++ b/src/test/regress/sql/multi_extension.sql
@@ -63,7 +63,9 @@ BEGIN
SELECT p.description previous_object, c.description current_object
FROM current_objects c FULL JOIN prev_objects p
ON p.description = c.description
- WHERE p.description is null OR c.description is null;
+ WHERE (p.description is null OR c.description is null)
+ AND c.description IS DISTINCT FROM 'function any_value(anyelement) anyelement'
+ AND c.description IS DISTINCT FROM 'function any_value_agg(anyelement,anyelement) anyelement';
DROP TABLE prev_objects;
ALTER TABLE current_objects RENAME TO prev_objects;
@@ -456,7 +458,7 @@ DELETE FROM pg_dist_shard WHERE shardid = 1;
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3);
-INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
+INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
@@ -591,20 +593,30 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '11.3-1';
SELECT * FROM multi_extension.print_extension_changes();
--- Test downgrade to 11.3-1 from 12.0-1
+-- Test downgrade to 11.3-1 from 11.3-2
+ALTER EXTENSION citus UPDATE TO '11.3-2';
+ALTER EXTENSION citus UPDATE TO '11.3-1';
+-- Should be empty result since upgrade+downgrade should be a no-op
+SELECT * FROM multi_extension.print_extension_changes();
+
+-- Snapshot of state at 11.3-2
+ALTER EXTENSION citus UPDATE TO '11.3-2';
+SELECT * FROM multi_extension.print_extension_changes();
+
+-- Test downgrade to 11.3-2 from 12.0-1
ALTER EXTENSION citus UPDATE TO '12.0-1';
CREATE TABLE null_shard_key (x int, y int);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('null_shard_key', null);
--- Show that we cannot downgrade to 11.3-1 becuase the cluster has a
+-- Show that we cannot downgrade to 11.3-2 becuase the cluster has a
-- distributed table with single-shard.
-ALTER EXTENSION citus UPDATE TO '11.3-1';
+ALTER EXTENSION citus UPDATE TO '11.3-2';
DROP TABLE null_shard_key;
-ALTER EXTENSION citus UPDATE TO '11.3-1';
+ALTER EXTENSION citus UPDATE TO '11.3-2';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
@@ -612,6 +624,26 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '12.0-1';
SELECT * FROM multi_extension.print_extension_changes();
+-- Test downgrade to 12.0-1 from 12.1-1
+ALTER EXTENSION citus UPDATE TO '12.1-1';
+ALTER EXTENSION citus UPDATE TO '12.0-1';
+-- Should be empty result since upgrade+downgrade should be a no-op
+SELECT * FROM multi_extension.print_extension_changes();
+
+-- Snapshot of state at 12.1-1
+ALTER EXTENSION citus UPDATE TO '12.1-1';
+SELECT * FROM multi_extension.print_extension_changes();
+
+-- Test downgrade to 12.1-1 from 12.2-1
+ALTER EXTENSION citus UPDATE TO '12.2-1';
+ALTER EXTENSION citus UPDATE TO '12.1-1';
+-- Should be empty result since upgrade+downgrade should be a no-op
+SELECT * FROM multi_extension.print_extension_changes();
+
+-- Snapshot of state at 12.2-1
+ALTER EXTENSION citus UPDATE TO '12.2-1';
+SELECT * FROM multi_extension.print_extension_changes();
+
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version
diff --git a/src/test/regress/sql/multi_hash_pruning.sql b/src/test/regress/sql/multi_hash_pruning.sql
index df432ca90..ef6da8638 100644
--- a/src/test/regress/sql/multi_hash_pruning.sql
+++ b/src/test/regress/sql/multi_hash_pruning.sql
@@ -336,12 +336,14 @@ FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
WHERE o_orderkey IN (1, 2)
OR l_orderkey IN (2, 3);
+SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
SELECT count(*)
FROM orders_hash_partitioned
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
WHERE o_orderkey IN (1, 2)
AND l_orderkey IN (2, 3);
+$Q$);
SET citus.task_executor_type TO DEFAULT;
diff --git a/src/test/regress/sql/multi_having_pushdown.sql b/src/test/regress/sql/multi_having_pushdown.sql
index 497fd8cc3..48475099d 100644
--- a/src/test/regress/sql/multi_having_pushdown.sql
+++ b/src/test/regress/sql/multi_having_pushdown.sql
@@ -43,7 +43,7 @@ EXPLAIN (COSTS FALSE)
SELECT sum(l_extendedprice * l_discount) as revenue
FROM lineitem_hash, orders_hash
WHERE o_orderkey = l_orderkey
- GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24
+ GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24
ORDER BY 1 DESC LIMIT 3;
EXPLAIN (COSTS FALSE)
diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql
index 4d202041f..b10be8424 100644
--- a/src/test/regress/sql/multi_insert_select.sql
+++ b/src/test/regress/sql/multi_insert_select.sql
@@ -2445,9 +2445,9 @@ INSERT INTO append_table SELECT * FROM append_table;
-- verify that CTEs at top level of INSERT SELECT, that can normally be inlined, would not be inlined by INSERT SELECT pushdown planner
-- and handled by pull to coordinator.
SELECT coordinator_plan($$
- EXPLAIN (COSTS FALSE) WITH cte_1 AS (SELECT id FROM dist_table_5 WHERE id = 5)
+ EXPLAIN (COSTS FALSE) WITH cte_1 AS (SELECT id FROM dist_table_5 WHERE id > 5)
INSERT INTO dist_table_5
- SELECT id FROM dist_table_5 JOIN cte_1 USING(id);
+ SELECT id FROM dist_table_5 JOIN cte_1 USING(id) OFFSET 5;
$$);
-- verify that CTEs at top level of SELECT part, would be inlined by Postgres and pushed down by INSERT SELECT planner.
diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql
index 52270409e..de3468415 100644
--- a/src/test/regress/sql/multi_mx_create_table.sql
+++ b/src/test/regress/sql/multi_mx_create_table.sql
@@ -51,7 +51,19 @@ CREATE OPERATOR citus_mx_test_schema.=== (
);
SET search_path TO public;
+
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+
+\if :server_version_ge_16
+-- In PG16, read-only server settings lc_collate and lc_ctype are removed
+-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
+SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
+\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
+\endif
+
CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale);
CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql
index 9d2536973..e5213a41b 100644
--- a/src/test/regress/sql/multi_mx_hide_shard_names.sql
+++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql
@@ -226,14 +226,32 @@ RESET citus.enable_metadata_sync;
-- the shards and indexes do not show up
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
+-- PG16 added one more backend type B_STANDALONE_BACKEND
+-- and also alphabetized the backend types, hence the orders changed
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+
+\if :server_version_ge_16
+SELECT 4 AS client_backend \gset
+SELECT 5 AS bgworker \gset
+SELECT 12 AS walsender \gset
+\else
+SELECT 3 AS client_backend \gset
+SELECT 4 AS bgworker \gset
+SELECT 9 AS walsender \gset
+\endif
+
-- say, we set it to bgworker
-- the shards and indexes do not show up
-SELECT set_backend_type(4);
+SELECT set_backend_type(:bgworker);
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- or, we set it to walsender
-- the shards and indexes do not show up
-SELECT set_backend_type(9);
+SELECT set_backend_type(:walsender);
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- unless the application name starts with citus_shard
@@ -242,7 +260,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
RESET application_name;
-- but, client backends to see the shards
-SELECT set_backend_type(3);
+SELECT set_backend_type(:client_backend);
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
diff --git a/src/test/regress/sql/multi_orderby_limit_pushdown.sql b/src/test/regress/sql/multi_orderby_limit_pushdown.sql
index 821c0130a..7b35d82eb 100644
--- a/src/test/regress/sql/multi_orderby_limit_pushdown.sql
+++ b/src/test/regress/sql/multi_orderby_limit_pushdown.sql
@@ -177,7 +177,7 @@ ORDER BY 2, AVG(ut.value_1), 1 DESC
LIMIT 2;
EXPLAIN (COSTS OFF)
-SELECT ut.user_id, count(DISTINCT ut.value_2)
+SELECT ut.user_id, avg(ut.value_2)
FROM users_table ut, events_table et
WHERE ut.user_id = et.user_id and et.value_2 < 5
GROUP BY ut.user_id
diff --git a/src/test/regress/sql/multi_router_planner_fast_path.sql b/src/test/regress/sql/multi_router_planner_fast_path.sql
index fc301e48f..1fd1f6ce0 100644
--- a/src/test/regress/sql/multi_router_planner_fast_path.sql
+++ b/src/test/regress/sql/multi_router_planner_fast_path.sql
@@ -768,7 +768,7 @@ SELECT * FROM author_articles_id_word_count(1);
-- use fast-path queries
PREPARE insert_sel(int, int) AS
INSERT INTO articles_hash
- SELECT * FROM articles_hash WHERE author_id = $2 AND word_count = $1 OFFSET 0;
+ SELECT max(id), max(author_id), title, word_count FROM articles_hash WHERE author_id = $2 AND word_count = $1 GROUP BY title, word_count;
EXECUTE insert_sel(1,1);
EXECUTE insert_sel(1,1);
diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql
index 7ca60162e..146cf78d4 100644
--- a/src/test/regress/sql/multi_schema_support.sql
+++ b/src/test/regress/sql/multi_schema_support.sql
@@ -293,7 +293,19 @@ SELECT * FROM nation_hash ORDER BY 1,2,3,4;
--test COLLATION with schema
SET search_path TO public;
+
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+
+\if :server_version_ge_16
+-- In PG16, read-only server settings lc_collate and lc_ctype are removed
+-- Relevant PG commit: b0f6c437160db640d4ea3e49398ebc3ba39d1982
+SELECT quote_ident((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) as current_locale \gset
+\else
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
+\endif
+
CREATE COLLATION test_schema_support.english (LOCALE = :current_locale);
\c - - - :master_port
@@ -983,6 +995,219 @@ BEGIN;
ALTER SCHEMA bar RENAME TO foo;
ROLLBACK;
+-- below tests are to verify dependency propagation with nested sub-transactions
+-- TEST1
+BEGIN;
+ CREATE SCHEMA sc1;
+ CREATE SEQUENCE sc1.seq;
+ CREATE TABLE sc1.s1(id int default(nextval('sc1.seq')));
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+
+-- TEST2
+CREATE SCHEMA sc1;
+BEGIN;
+ CREATE SEQUENCE sc1.seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('sc1.seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+
+-- TEST3
+SET citus.enable_metadata_sync TO off;
+CREATE SCHEMA sc1;
+SET citus.enable_metadata_sync TO on;
+BEGIN;
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+
+-- TEST4
+BEGIN;
+ SAVEPOINT sp1;
+ CREATE SCHEMA sc1;
+ ROLLBACK TO SAVEPOINT sp1;
+
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+
+-- TEST5
+BEGIN;
+ SAVEPOINT sp1;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp1;
+
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+DROP SEQUENCE seq1;
+
+-- TEST6
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc1;
+ ROLLBACK TO SAVEPOINT sp2;
+ RELEASE SAVEPOINT sp1;
+
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+
+-- TEST7
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp2;
+ RELEASE SAVEPOINT sp1;
+
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+DROP SEQUENCE seq1;
+
+-- TEST8
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp2;
+ ROLLBACK TO SAVEPOINT sp1;
+
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+
+-- TEST9
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ ROLLBACK TO SAVEPOINT sp2;
+
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+DROP SEQUENCE seq1;
+
+-- TEST10
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ RELEASE SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc3;
+ SAVEPOINT sp4;
+ CREATE SCHEMA sc1;
+ ROLLBACK TO SAVEPOINT sp4;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+
+ SET LOCAL citus.enable_metadata_sync TO off;
+ CREATE SCHEMA sc1;
+ SET LOCAL citus.enable_metadata_sync TO on;
+
+ CREATE TABLE sc1.s1(id int);
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+DROP SCHEMA sc2 CASCADE;
+DROP SCHEMA sc3 CASCADE;
+
+-- TEST11
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ RELEASE SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc3;
+ SAVEPOINT sp4;
+ CREATE SCHEMA sc1;
+ RELEASE SAVEPOINT sp4;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+
+ CREATE SEQUENCE seq1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+DROP SCHEMA sc2 CASCADE;
+DROP SCHEMA sc3 CASCADE;
+DROP SEQUENCE seq1;
+
+-- TEST12
+BEGIN;
+ SAVEPOINT sp1;
+ SAVEPOINT sp2;
+ CREATE SCHEMA sc2;
+ RELEASE SAVEPOINT sp2;
+ SAVEPOINT sp3;
+ CREATE SCHEMA sc3;
+ SAVEPOINT sp4;
+ CREATE SEQUENCE seq1;
+ CREATE SCHEMA sc1;
+ CREATE TABLE sc1.s1(id int default(nextval('seq1')));
+ SELECT create_distributed_table('sc1.s1','id');
+ RELEASE SAVEPOINT sp4;
+ RELEASE SAVEPOINT sp3;
+ RELEASE SAVEPOINT sp1;
+COMMIT;
+DROP SCHEMA sc1 CASCADE;
+DROP SCHEMA sc2 CASCADE;
+DROP SCHEMA sc3 CASCADE;
+DROP SEQUENCE seq1;
+
+-- issue-6614
+CREATE FUNCTION create_schema_test() RETURNS void AS $$
+BEGIN
+ SET citus.create_object_propagation = 'deferred';
+ CREATE SCHEMA test_1;
+ CREATE TABLE test_1.test (
+ id bigserial constraint test_pk primary key,
+ creation_date timestamp constraint test_creation_date_df default timezone('UTC'::text, CURRENT_TIMESTAMP) not null
+ );
+ PERFORM create_reference_table('test_1.test');
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+SELECT create_schema_test();
+SELECT result FROM run_command_on_all_nodes($$ SELECT COUNT(*) = 1 FROM pg_dist_partition WHERE logicalrelid = 'test_1.test'::regclass $$);
+DROP FUNCTION create_schema_test;
+DROP SCHEMA test_1 CASCADE;
+
-- Clean up the created schema
SET client_min_messages TO WARNING;
diff --git a/src/test/regress/sql/multi_select_distinct.sql b/src/test/regress/sql/multi_select_distinct.sql
index c3ba20cf1..597076199 100644
--- a/src/test/regress/sql/multi_select_distinct.sql
+++ b/src/test/regress/sql/multi_select_distinct.sql
@@ -303,7 +303,7 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
- GROUP BY l_orderkey
+ GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
-- check the plan if the hash aggreate is disabled. We expect to see sort + unique
@@ -312,7 +312,7 @@ SET enable_hashagg TO off;
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
- GROUP BY l_orderkey
+ GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
SET enable_hashagg TO on;
diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql
index b41aba577..422a97df4 100644
--- a/src/test/regress/sql/multi_sequence_default.sql
+++ b/src/test/regress/sql/multi_sequence_default.sql
@@ -65,6 +65,7 @@ CREATE TABLE seq_test_4 (x int, y int);
SELECT create_distributed_table('seq_test_4','x');
CREATE SEQUENCE seq_4;
ALTER TABLE seq_test_4 ADD COLUMN a bigint DEFAULT nextval('seq_4');
+ALTER TABLE seq_test_4 ADD COLUMN IF NOT EXISTS a bigint DEFAULT nextval('seq_4');
DROP SEQUENCE seq_4 CASCADE;
TRUNCATE seq_test_4;
CREATE SEQUENCE seq_4;
@@ -440,7 +441,7 @@ ROLLBACK;
-- Show that existing sequence has been renamed and a new sequence with the same name
-- created for another type
\c - - - :worker_1_port
-SELECT seqrelid::regclass, seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text like '%sequence_rollback%' ORDER BY 1,2;
+SELECT seqrelid::regclass, seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text in ('sequence_rollback', '"sequence_rollback(citus_backup_0)"') ORDER BY 1,2;
\c - - - :master_port
diff --git a/src/test/regress/sql/multi_subquery.sql b/src/test/regress/sql/multi_subquery.sql
index 68265606a..e5d8aa17c 100644
--- a/src/test/regress/sql/multi_subquery.sql
+++ b/src/test/regress/sql/multi_subquery.sql
@@ -676,7 +676,7 @@ EXPLAIN (COSTS OFF)
SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
EXPLAIN (COSTS OFF)
-SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
+SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1);
-- Simple join subquery pushdown
SELECT
diff --git a/src/test/regress/sql/multi_subquery_in_where_reference_clause.sql b/src/test/regress/sql/multi_subquery_in_where_reference_clause.sql
index a3dd9c06e..fc1bb5c17 100644
--- a/src/test/regress/sql/multi_subquery_in_where_reference_clause.sql
+++ b/src/test/regress/sql/multi_subquery_in_where_reference_clause.sql
@@ -132,7 +132,7 @@ SELECT
FROM
users_table RIGHT JOIN users_reference_table USING (user_id)
WHERE
- users_table.value_2 IN
+ users_reference_table.value_2 IN
(SELECT
value_2
FROM
diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql
index ba6d6f17f..f7c97f1b2 100644
--- a/src/test/regress/sql/multi_test_helpers.sql
+++ b/src/test/regress/sql/multi_test_helpers.sql
@@ -180,3 +180,373 @@ BEGIN
EXECUTE 'SELECT COUNT(*) FROM pg_catalog.pg_dist_cleanup' INTO record_count;
END LOOP;
END$$ LANGUAGE plpgsql;
+
+-- Returns the foreign keys where the referencing relation's name starts with
+-- given prefix.
+--
+-- Foreign keys are groupped by their configurations and then the constraint name,
+-- referencing table, and referenced table for each distinct configuration are
+-- aggregated into arrays.
+CREATE OR REPLACE FUNCTION get_grouped_fkey_constraints(referencing_relname_prefix text)
+RETURNS jsonb AS $func$
+ DECLARE
+ confdelsetcols_column_ref text;
+ get_grouped_fkey_constraints_query text;
+ result jsonb;
+ BEGIN
+ -- Read confdelsetcols as null if no such column exists.
+ -- This can only be the case for PG versions < 15.
+ IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_constraint'::regclass AND attname='confdelsetcols')
+ THEN
+ confdelsetcols_column_ref := '(SELECT array_agg(attname ORDER BY attnum) FROM pg_attribute WHERE attrelid = conrelid AND attnum = ANY(confdelsetcols))';
+ ELSE
+ confdelsetcols_column_ref := '(SELECT null::smallint[])';
+ END IF;
+
+ EXECUTE format(
+ $$
+ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.constraint_names) AS fkeys_with_different_config FROM (
+ SELECT array_agg(constraint_name ORDER BY constraint_oid) AS constraint_names,
+ array_agg(referencing_table::regclass::text ORDER BY constraint_oid) AS referencing_tables,
+ array_agg(referenced_table::regclass::text ORDER BY constraint_oid) AS referenced_tables,
+ referencing_columns, referenced_columns, deferable, deferred, on_update, on_delete, match_type, referencing_columns_set_null_or_default
+ FROM (
+ SELECT
+ oid AS constraint_oid,
+ conname AS constraint_name,
+ conrelid AS referencing_table,
+ (SELECT array_agg(attname ORDER BY attnum) FROM pg_attribute WHERE attrelid = conrelid AND attnum = ANY(conkey)) AS referencing_columns,
+ confrelid AS referenced_table,
+ (SELECT array_agg(attname ORDER BY attnum) FROM pg_attribute WHERE attrelid = confrelid AND attnum = ANY(confkey)) AS referenced_columns,
+ condeferrable AS deferable,
+ condeferred AS deferred,
+ confupdtype AS on_update,
+ confdeltype AS on_delete,
+ confmatchtype AS match_type,
+ %2$s AS referencing_columns_set_null_or_default
+ FROM pg_constraint WHERE starts_with(conrelid::regclass::text, '%1$s') AND contype = 'f'
+ ) q2
+ GROUP BY referencing_columns, referenced_columns, deferable, deferred, on_update, on_delete, match_type, referencing_columns_set_null_or_default
+ ) q1
+ $$,
+ referencing_relname_prefix,
+ confdelsetcols_column_ref
+ ) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION get_index_defs(schemaname text, tablename text)
+RETURNS jsonb AS $func$
+ DECLARE
+ result jsonb;
+ indnullsnotdistinct_column_ref text;
+ BEGIN
+ -- Not use indnullsnotdistinct in group by clause if no such column exists.
+ -- This can only be the case for PG versions < 15.
+ IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_index'::regclass AND attname='indnullsnotdistinct')
+ THEN
+ indnullsnotdistinct_column_ref := ',indnullsnotdistinct';
+ ELSE
+ indnullsnotdistinct_column_ref := '';
+ END IF;
+
+ EXECUTE format(
+ $$
+ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.indexnames) AS index_defs FROM (
+ SELECT array_agg(indexname ORDER BY indexrelid) AS indexnames,
+ array_agg(indexdef ORDER BY indexrelid) AS indexdefs
+ FROM pg_indexes
+ JOIN pg_index
+ ON (indexrelid = (schemaname || '.' || indexname)::regclass)
+ WHERE schemaname = '%1$s' AND starts_with(tablename, '%2$s')
+ GROUP BY indnatts, indnkeyatts, indisunique, indisprimary, indisexclusion,
+ indimmediate, indisclustered, indisvalid, indisready, indislive,
+ indisreplident, indkey, indcollation, indclass, indoption, indexprs,
+ indpred %3$s
+ ) q1
+ $$,
+ schemaname, tablename, indnullsnotdistinct_column_ref) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION get_column_defaults(schemaname text, tablename text)
+RETURNS jsonb AS $func$
+ DECLARE
+ result jsonb;
+ BEGIN
+ EXECUTE format(
+ $$
+ SELECT jsonb_agg(to_jsonb(q1.*) ORDER BY q1.column_name) AS column_defs FROM (
+ SELECT column_name, column_default::text, generation_expression::text
+ FROM information_schema.columns
+ WHERE table_schema = '%1$s' AND table_name = '%2$s' AND
+ column_default IS NOT NULL OR generation_expression IS NOT NULL
+ ) q1
+ $$,
+ schemaname, tablename) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION get_column_attrs(relname_prefix text)
+RETURNS jsonb AS $func$
+ DECLARE
+ result jsonb;
+ BEGIN
+ EXECUTE format(
+ $$
+ SELECT to_jsonb(q2.*) FROM (
+ SELECT relnames, jsonb_agg(to_jsonb(q1.*) - 'relnames' ORDER BY q1.column_name) AS column_attrs FROM (
+ SELECT array_agg(attrelid::regclass::text ORDER BY attrelid) AS relnames,
+ attname AS column_name, typname AS type_name, collname AS collation_name, attcompression AS compression_method, attnotnull AS not_null
+ FROM pg_attribute pa
+ LEFT JOIN pg_type pt ON (pa.atttypid = pt.oid)
+ LEFT JOIN pg_collation pc1 ON (pa.attcollation = pc1.oid)
+ JOIN pg_class pc2 ON (pa.attrelid = pc2.oid)
+ WHERE starts_with(attrelid::regclass::text, '%1$s') AND
+ attnum > 0 AND NOT attisdropped AND relkind = 'r'
+ GROUP BY column_name, type_name, collation_name, compression_method, not_null
+ ) q1
+ GROUP BY relnames
+ ) q2
+ $$,
+ relname_prefix) INTO result;
+ RETURN result;
+ END;
+$func$ LANGUAGE plpgsql;
+
+-- Returns true if all shard placements of given table have given number of indexes.
+CREATE OR REPLACE FUNCTION verify_index_count_on_shard_placements(
+ qualified_table_name text,
+ n_expected_indexes int)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ v_result boolean;
+BEGIN
+ SELECT n_expected_indexes = ALL(
+ SELECT result::int INTO v_result
+ FROM run_command_on_placements(
+ qualified_table_name,
+ $$SELECT COUNT(*) FROM pg_index WHERE indrelid::regclass = '%s'::regclass$$
+ )
+ );
+ RETURN v_result;
+END;
+$func$ LANGUAGE plpgsql;
+
+-- Returns names of the foreign keys that shards of given table are involved in
+-- (as referencing or referenced one).
+CREATE OR REPLACE FUNCTION get_fkey_names_on_placements(
+ qualified_table_name text)
+RETURNS TABLE (
+ on_node text,
+ shard_id bigint,
+ fkey_names text[]
+)
+AS $func$
+BEGIN
+ RETURN QUERY SELECT
+ CASE WHEN groupid = 0 THEN 'on_coordinator' ELSE 'on_worker' END AS on_node_col,
+ shardid,
+ (CASE WHEN result = '' THEN '{}' ELSE result END)::text[] AS fkey_names_col
+ FROM run_command_on_placements(
+ qualified_table_name,
+ $$SELECT array_agg(conname ORDER BY conname) FROM pg_constraint WHERE '%s'::regclass IN (conrelid, confrelid) AND contype = 'f'$$
+ )
+ JOIN pg_dist_node USING (nodename, nodeport);
+END;
+$func$ LANGUAGE plpgsql;
+
+-- Returns true if all shard placements of given table have given number of partitions.
+CREATE OR REPLACE FUNCTION verify_partition_count_on_placements(
+ qualified_table_name text,
+ n_expected_partitions int)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ v_result boolean;
+BEGIN
+ SELECT n_expected_partitions = ALL(
+ SELECT result::int INTO v_result
+ FROM run_command_on_placements(
+ qualified_table_name,
+ $$SELECT COUNT(*) FROM pg_inherits WHERE inhparent = '%s'::regclass;$$
+ )
+ );
+ RETURN v_result;
+END;
+$func$ LANGUAGE plpgsql;
+
+-- This function checks pg_dist_placement on all nodes and returns true if the following holds:
+-- Whether shard is on the coordinator or on a primary worker node, and if this is expected.
+-- Given shardid is used for shard placement of the table.
+-- Placement metadata is correct on all nodes.
+CREATE OR REPLACE FUNCTION verify_shard_placement_for_single_shard_table(
+ qualified_table_name text,
+ expected_shard_id bigint,
+ expect_placement_on_coord boolean)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ nodename_nodeport_groupid record;
+ result boolean;
+BEGIN
+ SELECT nodename, nodeport, groupid INTO nodename_nodeport_groupid
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE noderole = 'primary' AND shouldhaveshards AND isactive AND
+ logicalrelid = qualified_table_name::regclass AND shardid = expected_shard_id;
+
+ IF nodename_nodeport_groupid IS NULL
+ THEN
+ RAISE NOTICE 'Shard placement is not on a primary worker node';
+ RETURN false;
+ END IF;
+
+ IF (nodename_nodeport_groupid.groupid = 0) != expect_placement_on_coord
+ THEN
+ RAISE NOTICE 'Shard placement is on an unexpected node';
+ RETURN false;
+ END IF;
+
+ -- verify that metadata on workers is correct too
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_workers($$
+ SELECT COUNT(*) = 1
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE logicalrelid = ''%s''::regclass AND
+ shardid = %s AND
+ nodename = ''%s'' AND
+ nodeport = %s AND
+ groupid = %s
+ $$)
+ );',
+ qualified_table_name, expected_shard_id,
+ nodename_nodeport_groupid.nodename,
+ nodename_nodeport_groupid.nodeport,
+ nodename_nodeport_groupid.groupid
+ )
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
+
+-- This function checks pg_dist_placement on all nodes and returns true if the following holds:
+-- Shard placement exist on coordinator and on all primary worker nodes.
+-- Given shardid is used for shard placements of the table.
+-- Given placementid is used for the coordinator shard placement.
+-- Placement metadata is correct on all nodes.
+CREATE OR REPLACE FUNCTION verify_shard_placements_for_reference_table(
+ qualified_table_name text,
+ expected_shard_id bigint,
+ expected_coord_placement_id bigint)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ result boolean;
+BEGIN
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_all_nodes($$
+ SELECT
+ (SELECT COUNT(*) FROM pg_dist_node WHERE noderole = ''primary'' AND isactive) =
+ (SELECT COUNT(*)
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE noderole = ''primary'' AND isactive AND
+ logicalrelid = ''%s''::regclass AND shardid = %s)
+ AND
+ (SELECT COUNT(*) = 1
+ FROM pg_dist_shard
+ JOIN pg_dist_placement USING (shardid)
+ JOIN pg_dist_node USING (groupid)
+ WHERE noderole = ''primary'' AND isactive AND
+ logicalrelid = ''%s''::regclass AND shardid = %s AND
+ placementid = %s AND groupid = 0)
+
+ $$)
+ );',
+ qualified_table_name, expected_shard_id,
+ qualified_table_name, expected_shard_id,
+ expected_coord_placement_id
+ )
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
+
+-- This function checks pg_dist_partition on all nodes and returns true if the metadata
+-- record for given single-shard table is correct.
+CREATE OR REPLACE FUNCTION verify_pg_dist_partition_for_single_shard_table(
+ qualified_table_name text)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ result boolean;
+BEGIN
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_all_nodes($$
+ SELECT COUNT(*) = 1
+ FROM pg_dist_partition
+ WHERE logicalrelid = ''%s''::regclass AND
+ partmethod = ''n'' AND
+ partkey IS NULL AND
+ colocationid > 0 AND
+ repmodel = ''s'' AND
+ autoconverted = false
+ $$)
+ );',
+ qualified_table_name)
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
+
+-- This function checks pg_dist_partition on all nodes and returns true if the metadata
+-- record for given reference table is correct.
+CREATE OR REPLACE FUNCTION verify_pg_dist_partition_for_reference_table(
+ qualified_table_name text)
+RETURNS BOOLEAN
+AS $func$
+DECLARE
+ verify_workers_query text;
+ result boolean;
+BEGIN
+ SELECT format(
+ 'SELECT true = ALL(
+ SELECT result::boolean FROM run_command_on_all_nodes($$
+ SELECT COUNT(*) = 1
+ FROM pg_dist_partition
+ WHERE logicalrelid = ''%s''::regclass AND
+ partmethod = ''n'' AND
+ partkey IS NULL AND
+ colocationid > 0 AND
+ repmodel = ''t'' AND
+ autoconverted = false
+ $$)
+ );',
+ qualified_table_name)
+ INTO verify_workers_query;
+
+ EXECUTE verify_workers_query INTO result;
+ RETURN result;
+END;
+$func$ LANGUAGE plpgsql;
diff --git a/src/test/regress/sql/multi_transaction_recovery_multiple_databases.sql b/src/test/regress/sql/multi_transaction_recovery_multiple_databases.sql
new file mode 100644
index 000000000..768cd1628
--- /dev/null
+++ b/src/test/regress/sql/multi_transaction_recovery_multiple_databases.sql
@@ -0,0 +1,286 @@
+ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
+SELECT pg_reload_conf();
+
+SELECT $definition$
+CREATE OR REPLACE FUNCTION test.maintenance_worker()
+ RETURNS pg_stat_activity
+ LANGUAGE plpgsql
+AS $$
+DECLARE
+ activity record;
+BEGIN
+ DO 'BEGIN END'; -- Force maintenance daemon to start
+ -- we don't want to wait forever; loop will exit after 20 seconds
+ FOR i IN 1 .. 200 LOOP
+ PERFORM pg_stat_clear_snapshot();
+ SELECT * INTO activity FROM pg_stat_activity
+ WHERE application_name = 'Citus Maintenance Daemon' AND datname = current_database();
+ IF activity.pid IS NOT NULL THEN
+ RETURN activity;
+ ELSE
+ PERFORM pg_sleep(0.1);
+ END IF ;
+ END LOOP;
+ -- fail if we reach the end of this loop
+ raise 'Waited too long for maintenance daemon to start';
+END;
+$$;
+$definition$ create_function_test_maintenance_worker
+\gset
+
+CREATE DATABASE db1;
+
+SELECT oid AS db1_oid
+FROM pg_database
+WHERE datname = 'db1'
+\gset
+
+\c - - - :worker_1_port
+
+CREATE DATABASE db1;
+
+\c - - - :worker_2_port
+
+CREATE DATABASE db1;
+
+\c db1 - - :worker_1_port
+
+CREATE EXTENSION citus;
+
+\c db1 - - :worker_2_port
+
+CREATE EXTENSION citus;
+
+\c db1 - - :master_port
+
+CREATE EXTENSION citus;
+
+
+SELECT citus_add_node('localhost', :worker_1_port);
+SELECT citus_add_node('localhost', :worker_2_port);
+
+SELECT current_database();
+
+CREATE SCHEMA test;
+:create_function_test_maintenance_worker
+
+-- check maintenance daemon is started
+SELECT datname, current_database(),
+ usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
+FROM test.maintenance_worker();
+
+SELECT *
+FROM pg_dist_node;
+
+CREATE DATABASE db2;
+
+SELECT oid AS db2_oid
+FROM pg_database
+WHERE datname = 'db2'
+\gset
+
+\c - - - :worker_1_port
+CREATE DATABASE db2;
+\c - - - :worker_2_port
+CREATE DATABASE db2;
+
+
+\c db2 - - :worker_1_port
+CREATE EXTENSION citus;
+\c db2 - - :worker_2_port
+CREATE EXTENSION citus;
+\c db2 - - :master_port
+CREATE EXTENSION citus;
+
+SELECT citus_add_node('localhost', :worker_1_port);
+SELECT citus_add_node('localhost', :worker_2_port);
+
+SELECT current_database();
+
+CREATE SCHEMA test;
+:create_function_test_maintenance_worker
+
+-- check maintenance daemon is started
+SELECT datname, current_database(),
+ usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus')
+FROM test.maintenance_worker();
+
+SELECT *
+FROM pg_dist_node;
+
+SELECT groupid AS worker_1_group_id
+FROM pg_dist_node
+WHERE nodeport = :worker_1_port;
+\gset
+
+SELECT groupid AS worker_2_group_id
+FROM pg_dist_node
+WHERE nodeport = :worker_2_port;
+\gset
+
+-- Prepare transactions on first database
+\c db1 - - :worker_1_port
+
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_0_0_' || :'db1_oid' AS transaction_1_worker_1_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_1_db_1_name';
+
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_1_0_' || :'db1_oid' AS transaction_2_worker_1_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_1_db_1_name';
+
+\c db1 - - :worker_2_port
+
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_0_0_' || :'db1_oid' AS transaction_1_worker_2_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_2_db_1_name';
+
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_1_0_' || :'db1_oid' AS transaction_2_worker_2_db_1_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_2_db_1_name';
+
+-- Prepare transactions on second database
+\c db2 - - :worker_1_port
+
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_3_0_' || :'db2_oid' AS transaction_1_worker_1_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_1_db_2_name';
+
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_4_0_' || :'db2_oid' AS transaction_2_worker_1_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_1_db_2_name';
+
+\c db2 - - :worker_2_port
+
+BEGIN;
+CREATE TABLE should_abort
+(
+ value int
+);
+SELECT 'citus_0_1234_3_0_' || :'db2_oid' AS transaction_1_worker_2_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_1_worker_2_db_2_name';
+
+BEGIN;
+CREATE TABLE should_commit
+(
+ value int
+);
+SELECT 'citus_0_1234_4_0_' || :'db2_oid' AS transaction_2_worker_2_db_2_name
+\gset
+PREPARE TRANSACTION :'transaction_2_worker_2_db_2_name';
+
+\c db1 - - :master_port
+
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, :'transaction_2_worker_1_db_1_name'),
+ (:worker_2_group_id, :'transaction_2_worker_2_db_1_name');
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, 'citus_0_should_be_forgotten_' || :'db1_oid'),
+ (:worker_2_group_id, 'citus_0_should_be_forgotten_' || :'db1_oid');
+
+\c db2 - - :master_port
+
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, :'transaction_2_worker_1_db_2_name'),
+ (:worker_2_group_id, :'transaction_2_worker_2_db_2_name');
+INSERT INTO pg_dist_transaction
+VALUES (:worker_1_group_id, 'citus_0_should_be_forgotten_' || :'db2_oid'),
+ (:worker_2_group_id, 'citus_0_should_be_forgotten_' || :'db2_oid');
+
+\c db1 - - :master_port
+
+SELECT count(*) != 0
+FROM pg_dist_transaction;
+
+SELECT recover_prepared_transactions() > 0;
+
+SELECT count(*) = 0
+FROM pg_dist_transaction;
+
+\c db2 - - :master_port
+
+SELECT count(*) != 0
+FROM pg_dist_transaction;
+
+SELECT recover_prepared_transactions() > 0;
+
+SELECT count(*) = 0
+FROM pg_dist_transaction;
+
+\c regression - - :master_port
+
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db1' ;
+
+DROP DATABASE db1;
+
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db2' ;
+DROP DATABASE db2;
+
+\c - - - :worker_1_port
+
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db1' ;
+
+DROP DATABASE db1;
+
+SELECT count(pg_terminate_backend(pid)) > 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db2' ;
+DROP DATABASE db2;
+
+\c - - - :worker_2_port
+
+-- Count of terminated sessions is not important for the test,
+-- it is just to make output predictable
+SELECT count(pg_terminate_backend(pid)) >= 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db1' ;
+
+DROP DATABASE db1;
+
+SELECT count(pg_terminate_backend(pid)) >= 0
+FROM pg_stat_activity
+WHERE pid <> pg_backend_pid()
+ AND datname = 'db2' ;
+DROP DATABASE db2;
diff --git a/src/test/regress/sql/multi_view.sql b/src/test/regress/sql/multi_view.sql
index d80ed5c97..889dde818 100644
--- a/src/test/regress/sql/multi_view.sql
+++ b/src/test/regress/sql/multi_view.sql
@@ -37,7 +37,7 @@ CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN pri
SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5;
-CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR';
+CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part table_name_for_view WHERE l_shipmode = 'AIR';
-- join between view and table
SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey);
diff --git a/src/test/regress/sql/non_colocated_subquery_joins.sql b/src/test/regress/sql/non_colocated_subquery_joins.sql
index bde8f5b0a..a74b8e16e 100644
--- a/src/test/regress/sql/non_colocated_subquery_joins.sql
+++ b/src/test/regress/sql/non_colocated_subquery_joins.sql
@@ -792,7 +792,7 @@ SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('table2','tenant_id');
SELECT create_distributed_table('table1','tenant_id');
-CREATE VIEW table1_view AS SELECT * from table1 where id < 100;
+CREATE VIEW table1_view AS SELECT * from table1 table_name_for_view where id < 100;
-- all of the above queries are non-colocated subquery joins
-- because the views are replaced with subqueries
diff --git a/src/test/regress/sql/pg12.sql b/src/test/regress/sql/pg12.sql
index a86dbbb42..831ce40bb 100644
--- a/src/test/regress/sql/pg12.sql
+++ b/src/test/regress/sql/pg12.sql
@@ -242,11 +242,13 @@ COMMIT;
SELECT DISTINCT y FROM test;
-- non deterministic collations
+SET client_min_messages TO WARNING;
CREATE COLLATION test_pg12.case_insensitive (
provider = icu,
locale = '@colStrength=secondary',
deterministic = false
);
+RESET client_min_messages;
CREATE TABLE col_test (
id int,
diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql
index afac00174..8d3f430ce 100644
--- a/src/test/regress/sql/pg14.sql
+++ b/src/test/regress/sql/pg14.sql
@@ -509,7 +509,9 @@ SELECT create_distributed_table('J2_TBL','i');
-- test join using aliases
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- ok
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- ok
+\set VERBOSITY terse
SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- error
+\set VERBOSITY default
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1 ORDER BY 1,2,3,4; -- ok
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one' ORDER BY 1,2,3,4; -- error
SELECT * FROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1 ORDER BY 1,2,3,4; -- error (XXX could use better hint)
diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql
index a8ac91901..fe60222dd 100644
--- a/src/test/regress/sql/pg15.sql
+++ b/src/test/regress/sql/pg15.sql
@@ -530,7 +530,7 @@ SELECT create_distributed_table('FKTABLE', 'tid');
SELECT create_reference_table('FKTABLE');
-- show that the definition is expected
-SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
+SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY 1;
\c - - - :worker_1_port
@@ -933,6 +933,43 @@ DROP TABLE mx_ddl_table2;
DROP ACCESS METHOD heap2;
SELECT run_command_on_workers($$DROP ACCESS METHOD heap2$$);
+CREATE TABLE referenced (int_col integer PRIMARY KEY);
+CREATE TABLE referencing (text_col text);
+
+SET citus.shard_replication_factor TO 1;
+SELECT create_distributed_table('referenced', null);
+SELECT create_distributed_table('referencing', null);
+RESET citus.shard_replication_factor;
+
+CREATE OR REPLACE FUNCTION my_random(numeric)
+ RETURNS numeric AS
+$$
+BEGIN
+ RETURN 7 * $1;
+END;
+$$
+LANGUAGE plpgsql IMMUTABLE;
+
+ALTER TABLE referencing ADD COLUMN test_2 integer UNIQUE NULLS DISTINCT REFERENCES referenced(int_col);
+ALTER TABLE referencing ADD COLUMN test_3 integer GENERATED ALWAYS AS (text_col::int * my_random(1)) STORED UNIQUE NULLS NOT DISTINCT;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_grouped_fkey_constraints FROM get_grouped_fkey_constraints('pg15.referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+SELECT (groupid = 0) AS is_coordinator, result FROM run_command_on_all_nodes(
+ $$SELECT get_index_defs FROM get_index_defs('pg15', 'referencing')$$
+)
+JOIN pg_dist_node USING (nodeid)
+ORDER BY is_coordinator DESC, result;
+
+set citus.log_remote_commands = true;
+set citus.grep_remote_commands = '%ALTER DATABASE%';
+alter database regression REFRESH COLLATION VERSION;
+set citus.log_remote_commands = false;
+
-- Clean up
\set VERBOSITY terse
SET client_min_messages TO ERROR;
diff --git a/src/test/regress/sql/pg16.sql b/src/test/regress/sql/pg16.sql
new file mode 100644
index 000000000..82e9edf1e
--- /dev/null
+++ b/src/test/regress/sql/pg16.sql
@@ -0,0 +1,660 @@
+--
+-- PG16
+--
+SHOW server_version \gset
+SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
+\gset
+\if :server_version_ge_16
+\else
+\q
+\endif
+
+CREATE SCHEMA pg16;
+SET search_path TO pg16;
+SET citus.next_shard_id TO 950000;
+ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1400000;
+SET citus.shard_count TO 1;
+SET citus.shard_replication_factor TO 1;
+
+-- test the new vacuum and analyze options
+-- Relevant PG commits:
+-- https://github.com/postgres/postgres/commit/1cbbee03385763b066ae3961fc61f2cd01a0d0d7
+-- https://github.com/postgres/postgres/commit/4211fbd8413b26e0abedbe4338aa7cda2cd469b4
+-- https://github.com/postgres/postgres/commit/a46a7011b27188af526047a111969f257aaf4db8
+
+CREATE TABLE t1 (a int);
+SELECT create_distributed_table('t1','a');
+SET citus.log_remote_commands TO ON;
+
+VACUUM (PROCESS_MAIN FALSE) t1;
+VACUUM (PROCESS_MAIN FALSE, PROCESS_TOAST FALSE) t1;
+VACUUM (PROCESS_MAIN TRUE) t1;
+VACUUM (PROCESS_MAIN FALSE, FULL) t1;
+VACUUM (SKIP_DATABASE_STATS) t1;
+VACUUM (ONLY_DATABASE_STATS) t1;
+VACUUM (BUFFER_USAGE_LIMIT '512 kB') t1;
+VACUUM (BUFFER_USAGE_LIMIT 0) t1;
+VACUUM (BUFFER_USAGE_LIMIT 16777220) t1;
+VACUUM (BUFFER_USAGE_LIMIT -1) t1;
+VACUUM (BUFFER_USAGE_LIMIT 'test') t1;
+ANALYZE (BUFFER_USAGE_LIMIT '512 kB') t1;
+ANALYZE (BUFFER_USAGE_LIMIT 0) t1;
+
+SET citus.log_remote_commands TO OFF;
+
+-- only verifying it works and not printing log
+-- remote commands because it can be flaky
+VACUUM (ONLY_DATABASE_STATS);
+
+-- New GENERIC_PLAN option in EXPLAIN
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/3c05284
+
+CREATE TABLE tenk1 (
+ unique1 int4,
+ unique2 int4,
+ thousand int4
+);
+SELECT create_distributed_table('tenk1', 'unique1');
+
+SET citus.log_remote_commands TO on;
+EXPLAIN (GENERIC_PLAN) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
+EXPLAIN (GENERIC_PLAN, ANALYZE) SELECT unique1 FROM tenk1 WHERE thousand = 1000;
+SET citus.log_remote_commands TO off;
+
+-- Proper error when creating statistics without a name on a Citus table
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/624aa2a13bd02dd584bb0995c883b5b93b2152df
+
+CREATE TABLE test_stats (
+ a int,
+ b int
+);
+
+SELECT create_distributed_table('test_stats', 'a');
+
+CREATE STATISTICS (dependencies) ON a, b FROM test_stats;
+CREATE STATISTICS (ndistinct, dependencies) on a, b from test_stats;
+CREATE STATISTICS (ndistinct, dependencies, mcv) on a, b from test_stats;
+
+-- STORAGE option in CREATE is already propagated by Citus
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/784cedd
+CREATE TABLE test_storage (a text, c text STORAGE plain);
+SELECT create_distributed_table('test_storage', 'a', shard_count := 2);
+SELECT result FROM run_command_on_all_nodes
+($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
+ WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
+
+SELECT alter_distributed_table('test_storage', shard_count := 4);
+SELECT result FROM run_command_on_all_nodes
+($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
+ WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
+
+SELECT undistribute_table('test_storage');
+SELECT result FROM run_command_on_all_nodes
+($$ SELECT array_agg(DISTINCT (attname, attstorage)) FROM pg_attribute
+ WHERE attrelid::regclass::text ILIKE 'pg16.test_storage%' AND attnum > 0;$$) ORDER BY 1;
+
+-- New option to change storage to DEFAULT in PG16
+-- ALTER TABLE .. ALTER COLUMN .. SET STORAGE is already
+-- not supported by Citus, so this is also not supported
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/b9424d0
+SELECT create_distributed_table('test_storage', 'a');
+ALTER TABLE test_storage ALTER a SET STORAGE default;
+
+-- New ICU_RULES option added to CREATE DATABASE
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/30a53b7
+
+CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0';
+SELECT result FROM run_command_on_workers
+($$CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'$$);
+
+CREATE TABLE test_db_table (a text);
+SELECT create_distributed_table('test_db_table', 'a');
+INSERT INTO test_db_table VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green');
+-- icu default rules order
+SELECT * FROM test_db_table ORDER BY a COLLATE "en-x-icu";
+-- regression database's default order
+SELECT * FROM test_db_table ORDER BY a;
+
+-- now see the order in the new database
+\c test_db
+CREATE EXTENSION citus;
+\c - - - :worker_1_port
+CREATE EXTENSION citus;
+\c - - - :worker_2_port
+CREATE EXTENSION citus;
+\c - - - :master_port
+
+SELECT 1 FROM citus_add_node('localhost', :worker_1_port);
+SELECT 1 FROM citus_add_node('localhost', :worker_2_port);
+
+CREATE TABLE test_db_table (a text);
+SELECT create_distributed_table('test_db_table', 'a');
+INSERT INTO test_db_table VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green');
+-- icu default rules order
+SELECT * FROM test_db_table ORDER BY a COLLATE "en-x-icu";
+-- test_db database's default order with ICU_RULES = '&a < g'
+SELECT * FROM test_db_table ORDER BY a;
+
+\c regression
+\c - - - :master_port
+DROP DATABASE test_db;
+SELECT result FROM run_command_on_workers
+($$DROP DATABASE test_db$$);
+SET search_path TO pg16;
+
+-- New rules option added to CREATE COLLATION
+-- Similar to above test with CREATE DATABASE
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/30a53b7
+
+CREATE COLLATION default_rule (provider = icu, locale = '');
+CREATE COLLATION special_rule (provider = icu, locale = '', rules = '&a < g');
+
+CREATE TABLE test_collation_rules (a text);
+SELECT create_distributed_table('test_collation_rules', 'a');
+INSERT INTO test_collation_rules VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green');
+
+SELECT collname, collprovider, colliculocale, collicurules
+FROM pg_collation
+WHERE collname like '%_rule%'
+ORDER BY 1;
+
+SELECT * FROM test_collation_rules ORDER BY a COLLATE default_rule;
+SELECT * FROM test_collation_rules ORDER BY a COLLATE special_rule;
+
+\c - - - :worker_1_port
+SET search_path TO pg16;
+
+SELECT collname, collprovider, colliculocale, collicurules
+FROM pg_collation
+WHERE collname like '%_rule%'
+ORDER BY 1;
+
+SELECT * FROM test_collation_rules ORDER BY a COLLATE default_rule;
+SELECT * FROM test_collation_rules ORDER BY a COLLATE special_rule;
+
+\c - - - :master_port
+SET search_path TO pg16;
+SET citus.next_shard_id TO 951000;
+
+-- Foreign table TRUNCATE trigger
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/3b00a94
+SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
+SET citus.use_citus_managed_tables TO ON;
+CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
+INSERT INTO foreign_table_test VALUES (1, 'text_test');
+CREATE EXTENSION postgres_fdw;
+CREATE SERVER foreign_server
+ FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (host 'localhost', port :'master_port', dbname 'regression');
+CREATE USER MAPPING FOR CURRENT_USER
+ SERVER foreign_server
+ OPTIONS (user 'postgres');
+CREATE FOREIGN TABLE foreign_table (
+ id integer NOT NULL,
+ data text,
+ a bigserial
+)
+ SERVER foreign_server
+ OPTIONS (schema_name 'pg16', table_name 'foreign_table_test');
+
+-- verify it's a Citus foreign table
+SELECT partmethod, repmodel FROM pg_dist_partition
+WHERE logicalrelid = 'foreign_table'::regclass ORDER BY logicalrelid;
+
+INSERT INTO foreign_table VALUES (2, 'test_2');
+INSERT INTO foreign_table_test VALUES (3, 'test_3');
+
+CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+
+CREATE FUNCTION trigger_func_on_shard() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func_on_shard(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+
+CREATE TRIGGER trig_stmt_before BEFORE TRUNCATE ON foreign_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+SET citus.override_table_visibility TO off;
+CREATE TRIGGER trig_stmt_shard_before BEFORE TRUNCATE ON foreign_table_951001
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func_on_shard();
+RESET citus.override_table_visibility;
+
+SELECT * FROM foreign_table ORDER BY 1;
+TRUNCATE foreign_table;
+SELECT * FROM foreign_table ORDER BY 1;
+
+RESET citus.use_citus_managed_tables;
+
+--
+-- COPY FROM ... DEFAULT
+-- Already supported in Citus, adding all PG tests with a distributed table
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/9f8377f
+CREATE TABLE copy_default (
+ id integer PRIMARY KEY,
+ text_value text NOT NULL DEFAULT 'test',
+ ts_value timestamp without time zone NOT NULL DEFAULT '2022-07-05'
+);
+SELECT create_distributed_table('copy_default', 'id');
+
+-- if DEFAULT is not specified, then the marker will be regular data
+COPY copy_default FROM stdin;
+1 value '2022-07-04'
+2 \D '2022-07-05'
+\.
+SELECT * FROM copy_default ORDER BY id;
+TRUNCATE copy_default;
+
+COPY copy_default FROM stdin WITH (format csv);
+1,value,2022-07-04
+2,\D,2022-07-05
+\.
+SELECT * FROM copy_default ORDER BY id;
+TRUNCATE copy_default;
+
+-- DEFAULT cannot be used in binary mode
+COPY copy_default FROM stdin WITH (format binary, default '\D');
+
+-- DEFAULT cannot be new line nor carriage return
+COPY copy_default FROM stdin WITH (default E'\n');
+COPY copy_default FROM stdin WITH (default E'\r');
+
+-- DELIMITER cannot appear in DEFAULT spec
+COPY copy_default FROM stdin WITH (delimiter ';', default 'test;test');
+
+-- CSV quote cannot appear in DEFAULT spec
+COPY copy_default FROM stdin WITH (format csv, quote '"', default 'test"test');
+
+-- NULL and DEFAULT spec must be different
+COPY copy_default FROM stdin WITH (default '\N');
+
+-- cannot use DEFAULT marker in column that has no DEFAULT value
+COPY copy_default FROM stdin WITH (default '\D');
+\D value '2022-07-04'
+2 \D '2022-07-05'
+\.
+
+COPY copy_default FROM stdin WITH (format csv, default '\D');
+\D,value,2022-07-04
+2,\D,2022-07-05
+\.
+
+-- The DEFAULT marker must be unquoted and unescaped or it's not recognized
+COPY copy_default FROM stdin WITH (default '\D');
+1 \D '2022-07-04'
+2 \\D '2022-07-04'
+3 "\D" '2022-07-04'
+\.
+SELECT * FROM copy_default ORDER BY id;
+TRUNCATE copy_default;
+
+COPY copy_default FROM stdin WITH (format csv, default '\D');
+1,\D,2022-07-04
+2,\\D,2022-07-04
+3,"\D",2022-07-04
+\.
+SELECT * FROM copy_default ORDER BY id;
+TRUNCATE copy_default;
+
+-- successful usage of DEFAULT option in COPY
+COPY copy_default FROM stdin WITH (default '\D');
+1 value '2022-07-04'
+2 \D '2022-07-03'
+3 \D \D
+\.
+SELECT * FROM copy_default ORDER BY id;
+TRUNCATE copy_default;
+
+COPY copy_default FROM stdin WITH (format csv, default '\D');
+1,value,2022-07-04
+2,\D,2022-07-03
+3,\D,\D
+\.
+SELECT * FROM copy_default ORDER BY id;
+TRUNCATE copy_default;
+
+\c - - - :worker_1_port
+COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
+1,value,2022-07-04
+2,\D,2022-07-03
+3,\D,\D
+\.
+SELECT * FROM pg16.copy_default ORDER BY id;
+
+\c - - - :master_port
+TRUNCATE pg16.copy_default;
+
+\c - - - :worker_2_port
+COPY pg16.copy_default FROM stdin WITH (format csv, default '\D');
+1,value,2022-07-04
+2,\D,2022-07-03
+3,\D,\D
+\.
+SELECT * FROM pg16.copy_default ORDER BY id;
+
+\c - - - :master_port
+SET search_path TO pg16;
+SET citus.shard_count TO 1;
+SET citus.shard_replication_factor TO 1;
+
+-- DEFAULT cannot be used in COPY TO
+COPY (select 1 as test) TO stdout WITH (default '\D');
+
+-- Tests for SQL/JSON: JSON_ARRAYAGG and JSON_OBJECTAGG aggregates
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/7081ac4
+SET citus.next_shard_id TO 952000;
+
+CREATE TABLE agg_test(a int, b serial);
+SELECT create_distributed_table('agg_test', 'a');
+INSERT INTO agg_test SELECT i FROM generate_series(1, 5) i;
+
+-- JSON_ARRAYAGG with distribution key
+SELECT JSON_ARRAYAGG(a ORDER BY a),
+JSON_ARRAYAGG(a ORDER BY a RETURNING jsonb)
+FROM agg_test;
+
+-- JSON_ARRAYAGG with other column
+SELECT JSON_ARRAYAGG(b ORDER BY b),
+JSON_ARRAYAGG(b ORDER BY b RETURNING jsonb)
+FROM agg_test;
+
+-- JSON_ARRAYAGG with router query
+SET citus.log_remote_commands TO on;
+SELECT JSON_ARRAYAGG(a ORDER BY a),
+JSON_ARRAYAGG(a ORDER BY a RETURNING jsonb)
+FROM agg_test WHERE a = 2;
+RESET citus.log_remote_commands;
+
+-- JSON_OBJECTAGG with distribution key
+SELECT
+ JSON_OBJECTAGG(a: a),
+ JSON_ARRAYAGG(a ORDER BY a), -- for order
+ JSON_OBJECTAGG(a: a RETURNING jsonb)
+FROM
+ agg_test;
+
+-- JSON_OBJECTAGG with other column
+SELECT
+ JSON_OBJECTAGG(b: b),
+ JSON_ARRAYAGG(b ORDER BY b), -- for order
+ JSON_OBJECTAGG(b: b RETURNING jsonb)
+FROM
+ agg_test;
+
+-- JSON_OBJECTAGG with router query
+SET citus.log_remote_commands TO on;
+SELECT
+ JSON_OBJECTAGG(a: a),
+ JSON_OBJECTAGG(a: a RETURNING jsonb)
+FROM
+ agg_test WHERE a = 3;
+RESET citus.log_remote_commands;
+
+-- Tests for SQL/JSON: support the IS JSON predicate
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/6ee30209
+
+CREATE TABLE test_is_json (id bigserial, js text);
+SELECT create_distributed_table('test_is_json', 'id');
+
+INSERT INTO test_is_json(js) VALUES
+ (NULL),
+ (''),
+ ('123'),
+ ('"aaa "'),
+ ('true'),
+ ('null'),
+ ('[]'),
+ ('[1, "2", {}]'),
+ ('{}'),
+ ('{ "a": 1, "b": null }'),
+ ('{ "a": 1, "a": null }'),
+ ('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'),
+ ('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'),
+ ('aaa'),
+ ('{a:1}'),
+ ('["a",]');
+
+-- run IS JSON predicate in the worker nodes
+SELECT
+ js,
+ js IS JSON "JSON",
+ js IS NOT JSON "NOT JSON",
+ js IS JSON VALUE "VALUE",
+ js IS JSON OBJECT "OBJECT",
+ js IS JSON ARRAY "ARRAY",
+ js IS JSON SCALAR "SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ test_is_json ORDER BY js;
+
+-- pull the data, and run IS JSON predicate in the coordinator
+WITH pulled_data as (SELECT js FROM test_is_json OFFSET 0)
+SELECT
+ js,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ pulled_data ORDER BY js;
+
+SELECT
+ js,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ (SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js);
+
+SELECT
+ js0,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ (SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js);
+
+SELECT
+ js,
+ js IS JSON "IS JSON",
+ js IS NOT JSON "IS NOT JSON",
+ js IS JSON VALUE "IS VALUE",
+ js IS JSON OBJECT "IS OBJECT",
+ js IS JSON ARRAY "IS ARRAY",
+ js IS JSON SCALAR "IS SCALAR",
+ js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE",
+ js IS JSON WITH UNIQUE KEYS "WITH UNIQUE"
+FROM
+ (SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js);
+
+-- SYSTEM_USER
+-- Relevant PG commit:
+-- https://github.com/postgres/postgres/commit/0823d061
+
+CREATE TABLE table_name_for_view(id int, val_1 text);
+SELECT create_distributed_table('table_name_for_view', 'id');
+INSERT INTO table_name_for_view VALUES (1, 'test');
+
+-- define a view that uses SYSTEM_USER keyword
+CREATE VIEW prop_view_1 AS
+ SELECT *, SYSTEM_USER AS su FROM table_name_for_view;
+SELECT * FROM prop_view_1;
+
+-- check definition with SYSTEM_USER is correctly propagated to workers
+\c - - - :worker_1_port
+SELECT pg_get_viewdef('pg16.prop_view_1', true);
+
+\c - - - :master_port
+SET search_path TO pg16;
+
+-- REINDEX DATABASE/SYSTEM name is optional
+-- We already don't propagate these commands automatically
+-- Testing here with run_command_on_workers
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/2cbc3c1
+
+REINDEX DATABASE;
+SELECT result FROM run_command_on_workers
+($$REINDEX DATABASE$$);
+
+REINDEX SYSTEM;
+SELECT result FROM run_command_on_workers
+($$REINDEX SYSTEM$$);
+
+--
+-- random_normal() to provide normally-distributed random numbers
+-- adding here the same tests as the ones with random() in aggregate_support.sql
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/38d8176
+--
+
+CREATE TABLE dist_table (dist_col int, agg_col numeric);
+SELECT create_distributed_table('dist_table', 'dist_col');
+
+CREATE TABLE ref_table (int_col int);
+SELECT create_reference_table('ref_table');
+
+-- Test the cases where the worker agg exec. returns no tuples.
+
+SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
+FROM (SELECT *, random_normal() FROM dist_table) a;
+
+SELECT PERCENTILE_DISC((2 > random_normal(stddev => 1, mean => 0))::int::numeric / 10)
+ WITHIN GROUP (ORDER BY agg_col)
+FROM dist_table
+LEFT JOIN ref_table ON TRUE;
+
+-- run the same queries after loading some data
+
+INSERT INTO dist_table VALUES (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25),
+ (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19);
+
+SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col)
+FROM (SELECT *, random_normal() FROM dist_table) a;
+
+SELECT PERCENTILE_DISC((2 > random_normal(stddev => 1, mean => 0))::int::numeric / 10)
+ WITHIN GROUP (ORDER BY agg_col)
+FROM dist_table
+LEFT JOIN ref_table ON TRUE;
+
+--
+-- PG16 added WITH ADMIN FALSE option to GRANT ROLE
+-- WITH ADMIN FALSE is the default, make sure we propagate correctly in Citus
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/e3ce2de
+--
+
+CREATE ROLE role1;
+CREATE ROLE role2;
+
+SET citus.log_remote_commands TO on;
+SET citus.grep_remote_commands = '%GRANT%';
+-- default admin option is false
+GRANT role1 TO role2;
+REVOKE role1 FROM role2;
+-- should behave same as default
+GRANT role1 TO role2 WITH ADMIN FALSE;
+REVOKE role1 FROM role2;
+-- with admin option and with admin true are the same
+GRANT role1 TO role2 WITH ADMIN OPTION;
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH ADMIN TRUE;
+REVOKE role1 FROM role2;
+
+RESET citus.log_remote_commands;
+RESET citus.grep_remote_commands;
+
+--
+-- PG16 added new options to GRANT ROLE
+-- inherit: https://github.com/postgres/postgres/commit/e3ce2de
+-- set: https://github.com/postgres/postgres/commit/3d14e17
+-- We don't propagate for now in Citus
+--
+GRANT role1 TO role2 WITH INHERIT FALSE;
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH INHERIT TRUE;
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH INHERIT OPTION;
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH SET FALSE;
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH SET TRUE;
+REVOKE role1 FROM role2;
+GRANT role1 TO role2 WITH SET OPTION;
+REVOKE role1 FROM role2;
+
+-- connect to worker node
+GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE;
+
+SELECT roleid::regrole::text AS role, member::regrole::text,
+admin_option, inherit_option, set_option FROM pg_auth_members
+WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2;
+
+\c - - - :worker_1_port
+
+SELECT roleid::regrole::text AS role, member::regrole::text,
+admin_option, inherit_option, set_option FROM pg_auth_members
+WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2;
+
+SET citus.enable_ddl_propagation TO off;
+GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE;
+RESET citus.enable_ddl_propagation;
+
+SELECT roleid::regrole::text AS role, member::regrole::text,
+admin_option, inherit_option, set_option FROM pg_auth_members
+WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2;
+
+\c - - - :master_port
+REVOKE role1 FROM role2;
+
+-- test REVOKES as well
+GRANT role1 TO role2;
+REVOKE SET OPTION FOR role1 FROM role2;
+REVOKE INHERIT OPTION FOR role1 FROM role2;
+
+DROP ROLE role1, role2;
+
+-- test that everything works fine for roles that are not propagated
+SET citus.enable_ddl_propagation TO off;
+CREATE ROLE role3;
+CREATE ROLE role4;
+CREATE ROLE role5;
+RESET citus.enable_ddl_propagation;
+-- by default, admin option is false, inherit is true, set is true
+GRANT role3 TO role4;
+GRANT role3 TO role5 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE;
+SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role3' ORDER BY 1, 2;
+
+DROP ROLE role3, role4, role5;
+
+\set VERBOSITY terse
+SET client_min_messages TO ERROR;
+DROP EXTENSION postgres_fdw CASCADE;
+DROP SCHEMA pg16 CASCADE;
diff --git a/src/test/regress/sql/pgmerge.sql b/src/test/regress/sql/pgmerge.sql
index ab1f4a40d..e1f3c7aab 100644
--- a/src/test/regress/sql/pgmerge.sql
+++ b/src/test/regress/sql/pgmerge.sql
@@ -180,11 +180,13 @@ WHEN NOT MATCHED THEN
-- check if the target can be accessed from source relation subquery; we should
-- not be able to do so
+\set VERBOSITY terse
MERGE INTO target t
USING (SELECT * FROM source WHERE t.tid > sid) s
ON t.tid = s.sid
WHEN NOT MATCHED THEN
INSERT DEFAULT VALUES;
+\set VERBOSITY default
--
-- initial tests
--
@@ -431,6 +433,7 @@ SELECT * FROM target ORDER BY tid;
ROLLBACK;
-- and again with a subtle error: referring to non-existent target row for NOT MATCHED
+\set VERBOSITY terse
MERGE INTO target t
USING source AS s
ON t.tid = s.sid
@@ -520,6 +523,7 @@ WHEN NOT MATCHED AND t.balance = 100 THEN
INSERT (tid) VALUES (s.sid);
SELECT * FROM wq_target;
ROLLBACK;
+\set VERBOSITY default
MERGE INTO wq_target t
USING wq_source s ON t.tid = s.sid
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 8bd2ea923..06bdc39fe 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -273,6 +273,110 @@ CREATE PUBLICATION pubdep FOR TABLES IN SCHEMA deptest;
RESET citus.create_object_propagation;
DROP SCHEMA deptest CASCADE;
+--
+-- PG16 allows publications with schema and table of the same schema.
+-- backpatched to PG15
+-- Relevant PG commit: https://github.com/postgres/postgres/commit/13a185f
+--
+
+CREATE SCHEMA publication2;
+CREATE TABLE publication2.test1 (id int);
+SELECT create_distributed_table('publication2.test1', 'id');
+
+-- should be able to create publication with schema and table of the same
+-- schema
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test1;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+CREATE TABLE publication.test2 (id int);
+SELECT create_distributed_table('publication.test2', 'id');
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLE publication.test2;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+-- should be able to have publication2 schema and its new table test2 in testpub_for_tbl_schema publication
+ALTER TABLE test2 SET SCHEMA publication2;
+
+-- should be able to add a table of the same schema to the schema publication
+CREATE TABLE publication2.test3 (x int primary key, y int, "column-1" int);
+SELECT create_distributed_table('publication2.test3', 'x');
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLE publication2.test3;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+-- should be able to drop the table
+ALTER PUBLICATION testpub_for_tbl_schema DROP TABLE publication2.test3;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+DROP PUBLICATION testpub_for_tbl_schema;
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2;
+-- should be able to set publication with schema and table of the same schema
+ALTER PUBLICATION testpub_for_tbl_schema SET TABLES IN SCHEMA publication2, TABLE publication2.test1 WHERE (id < 99);
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+-- test that using column list for table is disallowed if any schemas are
+-- part of the publication
+DROP PUBLICATION testpub_for_tbl_schema;
+
+-- failure - cannot use column list and schema together
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2, TABLE publication2.test3(y);
+
+-- ok - only publish schema
+CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA publication2;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+-- failure - add a table with column list when there is already a schema in the
+-- publication
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLE publication2.test3(y);
+
+-- ok - only publish table with column list
+ALTER PUBLICATION testpub_for_tbl_schema SET TABLE publication2.test3(y);
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+-- failure - specify a schema when there is already a column list in the
+-- publication
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLES IN SCHEMA publication2;
+
+-- failure - cannot SET column list and schema together
+ALTER PUBLICATION testpub_for_tbl_schema SET TABLES IN SCHEMA publication2, TABLE publication2.test3(y);
+
+-- ok - drop table
+ALTER PUBLICATION testpub_for_tbl_schema DROP TABLE publication2.test3;
+SELECT DISTINCT c FROM (
+ SELECT unnest(result::text[]) c
+ FROM run_command_on_workers($$
+ SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%testpub_for_tbl_schema%' ORDER BY 1) s$$)
+ ORDER BY c) s;
+
+-- failure - cannot ADD column list and schema together
+ALTER PUBLICATION testpub_for_tbl_schema ADD TABLES IN SCHEMA publication2, TABLE publication2.test3(y);
+
-- make sure we can sync all the publication metadata
SELECT start_metadata_sync_to_all_nodes();
@@ -280,8 +384,10 @@ DROP PUBLICATION pubdep;
DROP PUBLICATION "pub-mix";
DROP PUBLICATION pubtables;
DROP PUBLICATION pubpartitioned;
+DROP PUBLICATION testpub_for_tbl_schema;
SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE;
DROP SCHEMA citus_schema_1 CASCADE;
+DROP SCHEMA publication2 CASCADE;
diff --git a/src/test/regress/sql/query_single_shard_table.sql b/src/test/regress/sql/query_single_shard_table.sql
index c77d5b1dd..96de2705c 100644
--- a/src/test/regress/sql/query_single_shard_table.sql
+++ b/src/test/regress/sql/query_single_shard_table.sql
@@ -892,16 +892,6 @@ SELECT raw_events_first.user_id
FROM raw_events_first INNER JOIN users_ref_table ON raw_events_first.user_id = users_ref_table.user_id
WHERE raw_events_first.value_1 IN (10, 11,12) OR users_ref_table.user_id IN (1,2,3,4);
--- We could relax distributed insert .. select checks to allow pushing
--- down more clauses down to the worker nodes when inserting into a single
--- shard by selecting from a colocated one. We might want to do something
--- like https://github.com/citusdata/citus/pull/6772.
---
--- e.g., insert into null_shard_key_1/citus_local/reference
--- select * from null_shard_key_1/citus_local/reference limit 1
---
--- Below "limit / offset clause" test and some others are examples of this.
-
-- limit / offset clause
INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LIMIT 1;
INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first OFFSET 1;
diff --git a/src/test/regress/sql/recurring_outer_join.sql b/src/test/regress/sql/recurring_outer_join.sql
index 595d734ec..d33309817 100644
--- a/src/test/regress/sql/recurring_outer_join.sql
+++ b/src/test/regress/sql/recurring_outer_join.sql
@@ -612,10 +612,9 @@ USING (a);
-- same test using a view, can be recursively planned
CREATE VIEW my_view_1 AS
-SELECT * FROM dist_1 t2 WHERE EXISTS (
+SELECT * FROM dist_1 table_name_for_view WHERE EXISTS (
SELECT * FROM dist_1 t4
- WHERE t4.a = t2.a
-);
+ WHERE t4.a = table_name_for_view.a);
SELECT COUNT(*) FROM ref_1 t1
LEFT JOIN
diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql
index cc5e74cd9..284fdfc66 100644
--- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql
+++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql
@@ -192,7 +192,7 @@ DROP VIEW numbers_v, local_table_v;
-- Joins between reference tables and materialized views are allowed to
-- be planned to be executed locally.
--
-CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10;
+CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers table_name_for_view WHERE a BETWEEN 1 AND 10;
REFRESH MATERIALIZED VIEW numbers_v;
SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a ORDER BY 1;
diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql
index 2b9bbf516..bd8065ab9 100644
--- a/src/test/regress/sql/schema_based_sharding.sql
+++ b/src/test/regress/sql/schema_based_sharding.sql
@@ -905,7 +905,6 @@ CREATE ROLE test_non_super_user WITH LOGIN;
ALTER ROLE test_non_super_user NOSUPERUSER;
GRANT CREATE ON DATABASE regression TO test_non_super_user;
-SELECT result FROM run_command_on_workers($$GRANT CREATE ON DATABASE regression TO test_non_super_user$$);
GRANT CREATE ON SCHEMA public TO test_non_super_user ;
@@ -997,7 +996,6 @@ $$);
\c - postgres
REVOKE CREATE ON DATABASE regression FROM test_non_super_user;
-SELECT result FROM run_command_on_workers($$REVOKE CREATE ON DATABASE regression FROM test_non_super_user$$);
REVOKE CREATE ON SCHEMA public FROM test_non_super_user;
diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql
index d64fb6826..07efa8617 100644
--- a/src/test/regress/sql/shard_rebalancer.sql
+++ b/src/test/regress/sql/shard_rebalancer.sql
@@ -1427,8 +1427,14 @@ SELECT create_distributed_table('test_rebalance_with_disabled_worker', 'a', colo
SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync(30000);
+-- errors out because shard replication factor > shard allowed node count
SELECT rebalance_table_shards('test_rebalance_with_disabled_worker');
+-- set replication factor to one, and try again
+SET citus.shard_replication_factor TO 1;
+SELECT rebalance_table_shards('test_rebalance_with_disabled_worker');
+SET citus.shard_replication_factor TO 2;
+
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
DROP TABLE test_rebalance_with_disabled_worker;
@@ -1439,7 +1445,7 @@ DROP TABLE IF EXISTS test_with_all_shards_excluded;
CREATE TABLE test_with_all_shards_excluded(a int PRIMARY KEY);
SELECT create_distributed_table('test_with_all_shards_excluded', 'a', colocate_with:='none', shard_count:=4);
-SELECT shardid FROM pg_dist_shard;
+SELECT shardid FROM pg_dist_shard ORDER BY shardid ASC;
SELECT rebalance_table_shards('test_with_all_shards_excluded', excluded_shard_list:='{102073, 102074, 102075, 102076}');
diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql
index 55f244b16..2bb7c58a3 100644
--- a/src/test/regress/sql/single_node.sql
+++ b/src/test/regress/sql/single_node.sql
@@ -114,8 +114,68 @@ WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND
RESET citus.enable_schema_based_sharding;
+-- Test lazy conversion from Citus local to single-shard tables
+-- and reference tables, on single node. This means that no shard
+-- replication should be needed.
+
+CREATE TABLE ref_table_conversion_test (
+ a int PRIMARY KEY
+);
+SELECT citus_add_local_table_to_metadata('ref_table_conversion_test');
+
+-- save old shardid and placementid
+SELECT get_shard_id_for_distribution_column('single_node.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset
+SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset
+
+SELECT create_reference_table('ref_table_conversion_test');
+
+SELECT public.verify_pg_dist_partition_for_reference_table('single_node.ref_table_conversion_test');
+SELECT public.verify_shard_placements_for_reference_table('single_node.ref_table_conversion_test',
+ :ref_table_conversion_test_old_shard_id,
+ :ref_table_conversion_test_old_coord_placement_id);
+
+CREATE TABLE single_shard_conversion_test_1 (
+ int_col_1 int PRIMARY KEY,
+ text_col_1 text UNIQUE,
+ int_col_2 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_1');
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_1') AS single_shard_conversion_test_1_old_shard_id \gset
+
+SELECT create_distributed_table('single_shard_conversion_test_1', null, colocate_with=>'none');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_1');
+SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_1', :single_shard_conversion_test_1_old_shard_id, true);
+
+CREATE TABLE single_shard_conversion_test_2 (
+ int_col_1 int
+);
+SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_2');
+
+-- save old shardid
+SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_2') AS single_shard_conversion_test_2_old_shard_id \gset
+
+SELECT create_distributed_table('single_shard_conversion_test_2', null, colocate_with=>'none');
+
+SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_2');
+SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_2', :single_shard_conversion_test_2_old_shard_id, true);
+
+-- make sure that they're created on different colocation groups
+SELECT
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'single_node.single_shard_conversion_test_1'::regclass
+)
+!=
+(
+ SELECT colocationid FROM pg_dist_partition
+ WHERE logicalrelid = 'single_node.single_shard_conversion_test_2'::regclass
+);
+
SET client_min_messages TO WARNING;
-DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2;
+DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2, ref_table_conversion_test, single_shard_conversion_test_1, single_shard_conversion_test_2;
DROP SCHEMA tenant_1 CASCADE;
RESET client_min_messages;
diff --git a/src/test/regress/sql/single_node_enterprise.sql b/src/test/regress/sql/single_node_enterprise.sql
index fb6e47b9a..19393ba24 100644
--- a/src/test/regress/sql/single_node_enterprise.sql
+++ b/src/test/regress/sql/single_node_enterprise.sql
@@ -272,6 +272,8 @@ BEGIN;
SELECT count(*) FROM test;
ROLLBACK;
+SET citus.shard_replication_factor TO 1;
+
-- now, lets move all the shards of distributed tables out of the coordinator
-- block writes is much faster for the sake of the test timings we prefer it
SELECT master_drain_node('localhost', :master_port, shard_transfer_mode:='block_writes');
diff --git a/src/test/regress/sql/sqlancer_failures.sql b/src/test/regress/sql/sqlancer_failures.sql
index d003d58be..a1e248bf9 100644
--- a/src/test/regress/sql/sqlancer_failures.sql
+++ b/src/test/regress/sql/sqlancer_failures.sql
@@ -237,18 +237,18 @@ ON (true);
SELECT
COUNT(unsupported_join.*)
FROM
- (distributed_table a
+ ((distributed_table a
LEFT JOIN reference_table b ON (true)
- RIGHT JOIN reference_table c ON (false)) as unsupported_join
+ RIGHT JOIN reference_table c ON (false))
RIGHT JOIN
- (reference_table d JOIN reference_table e ON(true)) ON (true);
+ (reference_table d JOIN reference_table e ON(true)) ON (true)) as unsupported_join;
SELECT
COUNT(unsupported_join.*)
FROM
- (distributed_table a
+ ((distributed_table a
LEFT JOIN (SELECT * FROM reference_table OFFSET 0) b ON (true)
- RIGHT JOIN (SELECT * FROM reference_table OFFSET 0) c ON (false)) as unsupported_join
+ RIGHT JOIN (SELECT * FROM reference_table OFFSET 0) c ON (false))
RIGHT JOIN
(
(SELECT * FROM reference_table OFFSET 0) d
@@ -256,7 +256,7 @@ RIGHT JOIN
(SELECT * FROM reference_table OFFSET 0) e
ON(true)
)
-ON (true);
+ON (true)) as unsupported_join;
EXPLAIN (COSTS OFF) SELECT
unsupported_join.*
diff --git a/src/test/regress/sql/subquery_local_tables.sql b/src/test/regress/sql/subquery_local_tables.sql
index 7f52120f1..65fc6a5b8 100644
--- a/src/test/regress/sql/subquery_local_tables.sql
+++ b/src/test/regress/sql/subquery_local_tables.sql
@@ -95,37 +95,6 @@ FROM
LIMIT 3;
--- subquery in FROM -> FROM -> WHERE -> WHERE should be replaced if
--- it contains onle local tables
--- Later the upper level query is also recursively planned due to LIMIT
-SELECT user_id, array_length(events_table, 1)
-FROM (
- SELECT user_id, array_agg(event ORDER BY time) AS events_table
- FROM (
- SELECT
- u.user_id, e.event_type::text AS event, e.time
- FROM
- users_table AS u,
- events_table AS e
- WHERE u.user_id = e.user_id AND
- u.user_id IN
- (
- SELECT
- user_id
- FROM
- users_table
- WHERE value_2 >= 5
- AND EXISTS (SELECT user_id FROM events_table_local WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1)
- AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id)
- LIMIT 5
- )
- ) t
- GROUP BY user_id
-) q
-ORDER BY 2 DESC, 1;
-
-
-
-- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to local tables
SELECT
user_id
diff --git a/src/test/regress/sql/undistribute_table.sql b/src/test/regress/sql/undistribute_table.sql
index 1703440c0..737f5a0f9 100644
--- a/src/test/regress/sql/undistribute_table.sql
+++ b/src/test/regress/sql/undistribute_table.sql
@@ -105,8 +105,8 @@ INSERT INTO view_table VALUES (1, 2, 3), (2, 4, 6), (3, 6, 9);
CREATE SCHEMA another_schema;
-CREATE VIEW undis_view1 AS SELECT a, b FROM view_table;
-CREATE VIEW undis_view2 AS SELECT a, c FROM view_table;
+CREATE VIEW undis_view1 AS SELECT a, b FROM view_table table_name_for_view;
+CREATE VIEW undis_view2 AS SELECT a, c FROM view_table table_name_for_view;
CREATE VIEW another_schema.undis_view3 AS SELECT b, c FROM undis_view1 JOIN undis_view2 ON undis_view1.a = undis_view2.a;
SELECT schemaname, viewname, viewowner, definition FROM pg_views WHERE viewname LIKE 'undis\_view%' ORDER BY viewname;
@@ -131,18 +131,6 @@ SELECT create_distributed_table('dist_type_table', 'a');
SELECT undistribute_table('dist_type_table');
--- test CREATE RULE with ON SELECT
-CREATE TABLE rule_table_1 (a INT);
-CREATE TABLE rule_table_2 (a INT);
-SELECT create_distributed_table('rule_table_2', 'a');
-
-CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2;
-
--- the CREATE RULE turns rule_table_1 into a view
-ALTER EXTENSION plpgsql ADD VIEW rule_table_1;
-
-SELECT undistribute_table('rule_table_2');
-
-- test CREATE RULE without ON SELECT
CREATE TABLE rule_table_3 (a INT);
CREATE TABLE rule_table_4 (a INT);
@@ -155,7 +143,6 @@ ALTER EXTENSION plpgsql ADD TABLE rule_table_3;
SELECT undistribute_table('rule_table_4');
ALTER EXTENSION plpgsql DROP VIEW extension_view;
-ALTER EXTENSION plpgsql DROP VIEW rule_table_1;
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
DROP TABLE view_table CASCADE;
diff --git a/src/test/regress/sql/upgrade_basic_after.sql b/src/test/regress/sql/upgrade_basic_after.sql
index f2fc61769..b40501a1e 100644
--- a/src/test/regress/sql/upgrade_basic_after.sql
+++ b/src/test/regress/sql/upgrade_basic_after.sql
@@ -143,3 +143,8 @@ SELECT * FROM t_range ORDER BY id;
ROLLBACK;
+
+-- There is a difference in partkey Var representation between PG16 and older versions
+-- Sanity check here that we can properly do column_to_column_name
+SELECT column_to_column_name(logicalrelid, partkey)
+FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;
diff --git a/src/test/regress/sql/upgrade_basic_before.sql b/src/test/regress/sql/upgrade_basic_before.sql
index e223ac965..868483264 100644
--- a/src/test/regress/sql/upgrade_basic_before.sql
+++ b/src/test/regress/sql/upgrade_basic_before.sql
@@ -51,3 +51,8 @@ UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid
6,3
7,4
\.
+
+-- There is a difference in partkey Var representation between PG16 and older versions
+-- Sanity check here that we can properly do column_to_column_name
+SELECT column_to_column_name(logicalrelid, partkey)
+FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;
diff --git a/src/test/regress/sql/upgrade_list_citus_objects.sql b/src/test/regress/sql/upgrade_list_citus_objects.sql
index c4932c46a..47fafea05 100644
--- a/src/test/regress/sql/upgrade_list_citus_objects.sql
+++ b/src/test/regress/sql/upgrade_list_citus_objects.sql
@@ -5,4 +5,6 @@ WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass
AND refobjid = e.oid
AND deptype = 'e'
AND e.extname='citus'
+ AND pg_catalog.pg_describe_object(classid, objid, 0) != 'function any_value(anyelement)'
+ AND pg_catalog.pg_describe_object(classid, objid, 0) != 'function any_value_agg(anyelement,anyelement)'
ORDER BY 1;
diff --git a/src/test/regress/sql/view_propagation.sql b/src/test/regress/sql/view_propagation.sql
index 44bbbf7b0..f0d63da85 100644
--- a/src/test/regress/sql/view_propagation.sql
+++ b/src/test/regress/sql/view_propagation.sql
@@ -207,15 +207,15 @@ UNION ALL
INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id;
-- Aliases are supported
-CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6;
+CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6 table_name_for_view;
-- View options are supported
CREATE VIEW opt_prop_view
WITH(check_option=CASCADED, security_barrier=true)
- AS SELECT * FROM view_table_6;
+ AS SELECT * FROM view_table_6 table_name_for_view;
CREATE VIEW sep_opt_prop_view
- AS SELECT * FROM view_table_6
+ AS SELECT * FROM view_table_6 table_name_for_view
WITH LOCAL CHECK OPTION;
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1;
@@ -273,7 +273,7 @@ CREATE OR REPLACE VIEW view_for_unsup_commands AS SELECT id FROM table_to_test_u
CREATE TABLE alter_view_table(id int, val1 text);
SELECT create_distributed_table('alter_view_table','id');
-CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table;
+CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table table_name_for_view;
-- Set/drop default value is not supported by Citus
ALTER VIEW alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text;